import requests
from lxml import etree
import json
#解析频道页 构造catId
def get_channel_info(url):
html=requests.get(url,headers=headers).text
selector=etree.HTML(html)
zixun_infos=selector.xpath('//ul[@class="header-column header-column1 header-column-zx menu-box"]/li/a')
items=[]
for info in zixun_infos:
item={}
channel_name=info.xpath('text()')[0]
catId=info.xpath('@href')[0].replace('/channel/','').replace('.html','')
#print(channel_name,catId)
item['channel_name']=channel_name
item['catId']=catId
items.append(item)
return items
def get_total_page(item):
catId = item['catId']
post_data={
'huxiu_hash_code':'18f3ca29452154dfe46055ecb6304b4e',
'page':'1',
'catId':catId
}
html=requests.post(post_url,data=post_data,headers=headers).text
dict_data=json.loads(html)
parse_data=dict_data['data']
total_page=int(parse_data['total_page'])
item2={}
item2['channel_name']=item['channel_name']
item2['total_page'] = total_page
item2['catId'] = item['catId']
return item2
def get_all_article_url(channel_name,post_url,post_data):
lit_article_url=[]
html=requests.post(post_url,data=post_data,headers=headers).text
dict_data=json.loads(html)
parse_data=dict_data['data']
total_page=parse_data['total_page']
data=parse_data['data']
#print(data)
selector=etree.HTML(data)
article_urls=selector.xpath('//a/@href')
for article_url in article_urls:
if article_url.startswith('/article'):
article_url=root_url+article_url
print(channel_name,article_url)
item3={}
item3['channel_name']=channel_name
item3['article_url']=article_url
lit_article_url.append(item3)
return lit_article_url
def get_all_article_content(item):
article_url=item['article_url']
channel_name = item['channel_name']
html=requests.get(article_url,headers=headers).text
selector=etree.HTML(html)
infos='\n'.join(selector.xpath('//p/text()'))
#print(channel_name,article_url)
#print(infos)
if __name__ == '__main__':
root_url = 'https://www.huxiu.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
items=get_channel_info(root_url)
post_url = 'https://www.huxiu.com/channel/ajaxGetMore'
total_pages=[]
for item in items:
item2=get_total_page(item)
total_pages.append(item2)
print(total_pages)
for item2 in total_pages:
catId = item2['catId']
total_page=item2['total_page']
for page in range(1,total_page+1):
post_data = {
'huxiu_hash_code': '18f3ca29452154dfe46055ecb6304b4e',
'page': page,
'catId': catId
}
channel_name=item2['channel_name']
#print(channel_name,post_data)
lit_article_url=get_all_article_url(channel_name,post_url,post_data)
for item3 in lit_article_url:
get_all_article_content(item3)
【Python爬虫】get和post请求解析虎嗅网封装
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- 本篇将介绍urllib2的Get和Post方法,更多内容请参考:python学习指南 urllib2默认只支持HT...
- 前言 ReactiveCocoa相信大家一定不会陌生,针对于MVVM架构而已,简直是如虎添翼,用过的人自然会明白有...