search_url, headers=self.headers, timeout=self.timeout).買粉絲ntent
def get_wx_url_by_sougou_search_買粉絲(self, sougou_search_買粉絲):
' 根據返回sougou_search_買粉絲,從中獲取買粉絲主頁鏈接 '
doc = pq(sougou_search_買粉絲)
#print doc('p[class="tit"]')('a').attr('href')
#print doc('div[class=img-box]')('a').attr('href')
#通過pyquery的方式處理網頁內容,類似用beautifulsoup,但是pyquery和jQuery的方法類似,找到買粉絲主頁地址
return doc('div[class=txt-box]')('p[class=tit]')('a').attr('href')
def get_selenium_js_買粉絲(self, wx_url):
' 執行js渲染內容,并返回渲染后的買粉絲內容 '
browser = webdriver.PhantomJS()
browser.get(wx_url)
time.sleep(3)
# 執行js得到整個dom
買粉絲 = browser.execute_script("return document.documentElement.outerHTML")
return 買粉絲
def parse_wx_articles_by_買粉絲(self, selenium_買粉絲):
' 從selenium_買粉絲中解析出買粉絲買粉絲文章 '
doc = pq(selenium_買粉絲)
return doc('div[class="weui_msg_card"]')
def switch_arctiles_to_list(self, articles):
' 把articles轉換成數據字典 '
articles_list = []
i = 1
if articles:
for article in articles.items():
self.log(u'開始整合(%d/%d)' % (i, len(articles)))
articles_list.append(self.parse_one_article(article))
i += 1
# break
return articles_list
def parse_one_article(self, article):
' 解析單篇文章 '
article_dict = { }
article = article('.weui_media_box[id]')
title = article('h4[class="weui_media_title"]').text()
self.log('標題是: %s' % title)
url = '買粉絲://mp.weixin.qq.買粉絲' + article('h4[class="weui_media_title"]').attr('hrefs')
self.log('地址為: %s' % url)
summary = article('.weui_media_desc').text()
self.log('文章簡述: %s' % summary)
date = article('.weui_media_extra_info').text()
self.log('發表時間為: %s' % date)
pic = self.parse_買粉絲ver_pic(article)
買粉絲ntent = self.parse_買粉絲ntent_by_url(url).買粉絲()
買粉絲ntentfiletitle=self.kw+'/'+title+'_'+date+'.買粉絲'
self.save_買粉絲ntent_file(買粉絲ntentfiletitle,買粉絲ntent)
return {
'title': title,
'url': url,
'summary': summary,
'date': date,
'pic': pic,
'買粉絲ntent': 買粉絲ntent
}
def parse_買粉絲ver_pic(self, article):
' 解析文章封面圖片 '
pic = article('.weui_media_hd').attr('style')
p = re.買粉絲pile(r'background-image:url(.∗?)')
rs = p.findall(pic)
self.log( '封面圖片是:%s ' % rs[0] if len(rs) > 0 else '')
return rs[0] if len(rs) > 0 else ''
def parse_買粉絲ntent_by_url(self, url):
' 獲取文章詳情內容 '
page_買粉絲 = self.get_selenium_js_買粉絲(url)
return pq(page_買粉絲)('#js_買粉絲ntent')
def save_買粉絲ntent_file(self,title,買粉絲ntent):
' 頁面內容寫入文件 '
with open(title, 'w') as f:
f.write(買粉絲ntent)
def save_file(self, 買粉絲ntent):
' 數據寫入文件 '
with open(