Python四期爬虫作业

【Python爬虫】get和post请求解析虎嗅网封装

2017-09-12  本文已影响35人  d1b0f55d8efb
import requests
from lxml import etree
import json
#解析频道页 构造catId

def get_channel_info(url):
    html=requests.get(url,headers=headers).text
    selector=etree.HTML(html)
    zixun_infos=selector.xpath('//ul[@class="header-column header-column1 header-column-zx menu-box"]/li/a')
    items=[]
    for info in zixun_infos:
        item={}
        channel_name=info.xpath('text()')[0]
        catId=info.xpath('@href')[0].replace('/channel/','').replace('.html','')
        #print(channel_name,catId)
        item['channel_name']=channel_name
        item['catId']=catId
        items.append(item)
    return items

def get_total_page(item):
    catId = item['catId']
    post_data={
        'huxiu_hash_code':'18f3ca29452154dfe46055ecb6304b4e',
        'page':'1',
        'catId':catId
    }
    html=requests.post(post_url,data=post_data,headers=headers).text
    dict_data=json.loads(html)
    parse_data=dict_data['data']
    total_page=int(parse_data['total_page'])
    item2={}
    item2['channel_name']=item['channel_name']
    item2['total_page'] = total_page
    item2['catId'] = item['catId']
    return item2

def get_all_article_url(channel_name,post_url,post_data):
    lit_article_url=[]
    html=requests.post(post_url,data=post_data,headers=headers).text
    dict_data=json.loads(html)
    parse_data=dict_data['data']
    total_page=parse_data['total_page']
    data=parse_data['data']
    #print(data)
    selector=etree.HTML(data)
    article_urls=selector.xpath('//a/@href')
    for article_url in article_urls:
        if article_url.startswith('/article'):
            article_url=root_url+article_url
            print(channel_name,article_url)
            item3={}
            item3['channel_name']=channel_name
            item3['article_url']=article_url
            lit_article_url.append(item3)
    return lit_article_url


def get_all_article_content(item):
    article_url=item['article_url']
    channel_name = item['channel_name']
    html=requests.get(article_url,headers=headers).text
    selector=etree.HTML(html)
    infos='\n'.join(selector.xpath('//p/text()'))
    #print(channel_name,article_url)
    #print(infos)


if __name__ == '__main__':
    root_url = 'https://www.huxiu.com'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
    }
    items=get_channel_info(root_url)
    post_url = 'https://www.huxiu.com/channel/ajaxGetMore'
    total_pages=[]
    for item in items:
        item2=get_total_page(item)
        total_pages.append(item2)
    print(total_pages)
    for item2 in total_pages:
        catId = item2['catId']
        total_page=item2['total_page']
        for page in range(1,total_page+1):
            post_data = {
                'huxiu_hash_code': '18f3ca29452154dfe46055ecb6304b4e',
                'page': page,
                'catId': catId
            }
            channel_name=item2['channel_name']
            #print(channel_name,post_data)
            lit_article_url=get_all_article_url(channel_name,post_url,post_data)
            for item3 in lit_article_url:
                get_all_article_content(item3)
上一篇 下一篇

猜你喜欢

热点阅读