nbsp;
import re
import json
import os
class weixin_spider:
def __init__(self, kw):
' 構造函數 '
self.kw = kw
# 搜狐買粉絲搜索鏈接
#self.sogou_search_url = '買粉絲://weixin.sogou.買粉絲/weixin?type=1&query=%s&ie=utf8&_sug_=n&_sug_type_=' % quote(self.kw)
self.sogou_search_url = '買粉絲://weixin.sogou.買粉絲/weixin?type=1&query=%s&ie=utf8&s_from=input&_sug_=n&_sug_type_=' % quote(self.kw)
# 爬蟲偽裝
self.headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 FirePHP/0refox/47.0 FirePHP/0.7.4.1'}
# 操作超時時長
self.timeout = 5
self.s = requests.Session()
def get_search_result_by_kw(self):
self.log('搜索地址為:%s' % self.sogou_search_url)
return self.s.get(self.sogou_search_url, headers=self.headers, timeout=self.timeout).買粉絲ntent
def get_wx_url_by_sougou_search_買粉絲(self, sougou_search_買粉絲):
' 根據返回sougou_search_買粉絲,從中獲取買粉絲主頁鏈接 '
doc = pq(sougou_search_買粉絲)
#print doc('p[class="tit"]')('a').attr('href')
#print doc('div[class=img-box]')('a').attr('href')
#通過pyquery的方式處理網頁內容,類似用beautifulsoup,但是pyquery和jQuery的方法類似,找到買粉絲主頁地址
return doc('div[class=txt-box]')('p[class=tit]')('a').attr('href')
def get_selenium_js_買粉絲(self, wx_url):
' 執行js渲染內容,并返回渲染后的買粉絲內容 '
browser = webdriver.PhantomJS()
browser.get(wx_url)
time.sleep(3)
# 執行js得到整個dom
買粉絲 = browser.execute_script("return document.documentElement.outerHTML")
return 買粉絲
def parse_wx_articles_by_買粉絲(self, selenium_買粉絲):
' 從selenium_買粉絲中解析出買粉絲買粉絲文章 '
doc = pq(selenium_買粉絲)
return doc('div[class="weui_msg_card"]')
def switch_arctiles_to_list(self, articles):
' 把articles轉換成數據字典 '
articles_list = []
i = 1
if articles:
for article in articles.items():
self.log(u'開始整合(%d/%d)' % (i, len(articles)))
articles_list.append(self.parse_one_article(article))
i += 1
# break
return articles_list
def parse_one_article(self, article):
' 解析單篇文章 '
article_dict = { }
article = article('.weui_media_box[id]')
title = article('h4[class="weui_media_title"]').text()
self.log('標題是: %s' % title)
url = '買粉絲://mp.weixin.qq.買粉絲' + article('h4[class="weui_media_title"]').attr('hrefs')
self.log('地址為: %s' % url)
summary = article('.weui_media_desc').text()
self.log('文章簡述: %s' % summary)
date = article('.weui_media_extra_info').text()
self.log('發表時間為: %s' % date)
pic = self.parse_買粉絲ver_pic(article)
買粉絲ntent = self.parse_買粉絲ntent_by_url(url).買粉絲()
買粉絲ntentfiletitle=self.kw+'/'+title+'_'+date+'.買粉絲'
self.save_買粉絲ntent_file(買粉絲ntentfiletitle,買粉絲ntent)
return {
'title': title,
'url': url,
'summary': summary,
'date': date,
'pic': pic,