【Python爬虫】第十七次作业

2017-09-10  本文已影响0人  Mango907
import requests
import os
from lxml import etree
from multiprocessing import Pool
def get_menu_urls(url):
    req = requests.get(url)
    req.encoding=                                                                                                                                                                                          'gb2312'
    html=req.text
    selector=etree.HTML(html)
    infos=selector.xpath('//div[@class="contain"][1]/ul/li[position()<10]/a')
    url_list=[]
    for info in infos:
        a_text=info.xpath('text()')
        a_href=info.xpath('@href')
        if len(a_text)==0 or a_text[0]=='经典影片':
            pass
        else:
            menu_url=url+a_href[0]
            print(a_text[0],menu_url)
            req2 = requests.get(menu_url)
            req2.encoding='gb2312'
            html2=req2.text
            selector2=etree.HTML(html2)
            page_total=selector2.xpath('//div[@class="co_content8"]/div[@class="x"]//text()')[1].split('/')[0].replace('共','').replace('页','')
            print(page_total)
            list_id=selector2.xpath('//div[@class="co_content8"]/div[@class="x"]//a/@href')[0].replace('2.html','')
            print(list_id)
            for i in range(1,int(page_total)+1):
                right_url=list_id+str(i)
                page_url=menu_url.replace('index',right_url)
                item={}
                item['menu']=a_text[0]
                item['page']=page_url
                url_list.append(item)
    return url_list
def get_source(item):
    page_url = item['page']
    menu_name = item['menu']
    base_dir=os.path.abspath(__file__)
    parent_dir=os.path.dirname(base_dir)
    menu_dir=os.path.join(parent_dir,menu_name)
    print(menu_dir)
    if os.path.isdir(menu_dir):
        pass
    else:
        os.mkdir(menu_dir)
    os.chdir(menu_dir)
    req3=requests.get(page_url)
    req3.encoding='gb2312'
    html3=req3.text
    selector3=etree.HTML(html3)
    infos3=selector3.xpath('//div[@class="co_content8"]//a[@class="ulink"]/@href')
    for info3 in infos3:
        movie_url ='http://www.ygdy8.com' + info3
    req4 = requests.get(movie_url)
    req4.encoding = 'gb2312'
    html4 = req4.text
    selector4 = etree.HTML(html4)
    movie_name = selector4.xpath('//div[@class="title_all"]/h1/font/text()')[0]
    short_name = movie_name.split('《')[1].split('》')[0].replace('/','-')
    file_name = os.path.join(menu_dir, short_name + 'txt')
    with open(file_name,'w',encoding='utf-8') as file:
        movie_sources=selector4.xpath('//td[@style="WORD-WRAP: break-word"]/a/@href')
        if len(movie_sources)>0:
            for movie_source in movie_sources:
                print(page_url, movie_url, movie_name,movie_source)
                file.write(page_url + '\n')
                file.write(movie_name+'\n')
                file.write(movie_source + '\n')
        else:
            pass
if __name__ == '__main__':
    root_url = 'http://www.ygdy8.com'
    url_list=get_menu_urls(root_url)
    p=Pool(4)
    p.map(get_source,url_list)
上一篇下一篇

猜你喜欢

热点阅读