python爬虫程序员今日看点

新手向爬虫(四)Python爬取英文新书下载链接

2016-11-11  本文已影响652人  treelake

该文是Python相关英文新书下载链接一文的代码实现部分,虽然有作者做过,但我还是自己重新用Scrapy实现了下,思路大致都差不多。


Scrapy爬虫

我命名文件为book1.py,使用scrapy runspider book1.py -o 2.json运行。
用的是selenium操作Chrome浏览器,Chromedriver下载,使用时要指定该可执行文件的位置。
修改第7行的search_what获取其它资源的所有相关书籍链接,这里我选的是python。

# -*- coding: utf-8 -*-
import scrapy

from selenium import webdriver
import re

search_what = 'python'
browser = webdriver.Chrome(r'E:\python\Scrapy2\book\book\spiders\chromedriver') # 指定Chromedriver.exe的位置

class Book1Spider(scrapy.Spider):
    name = "book1"
    allowed_domains = ["foxebook.net", "zippyshare.com"]
    
    down_base_url = ".zippyshare.com" # wwwxy,xy为数字代表不同服务器
    search_url = "http://www.foxebook.net/search/{}/page/{}"
    book_base_url = "http://www.foxebook.net"
    wd = browser # 获得浏览器对象
    reg = re.compile('<a id="dlbutton" href="(.*)">')
    
    headers = {
        "User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36",
    }
    
    num = 0 # 页数

    def start_requests(self): # 默认的开始函数,用于提供要爬取的链接
        yield scrapy.Request(self.search_url.format(search_what, 0),
                             headers = self.headers,
                             callback = self.first_parse)
                
    def first_parse(self, response):
        max_page = response.css('.next a::attr("href")').extract_first().split('/')[-2]
        print(int(max_page))
        while self.num < int(max_page):
            self.num += 1
            yield scrapy.Request(self.search_url.format(search_what, self.num),
                                 headers = self.headers,
                                 callback = self.list_parse)
                             
    def list_parse(self, response):
        title = response.css('h3 a::text').extract()
        img = response.css('.img-responsive::attr("src")').extract()
        download = response.css(' .btn-info::attr("href")').extract()
        info =  response.css('.info~ .info+ .info i::text').extract()
        for t, im, d, inf in zip(title, img, download, info):
            meta = {}
            # 使用 meta参数 传值
            meta['title'] = t
            meta['img'] = im
            meta['info'] = inf
            yield scrapy.Request(self.book_base_url + d,
                                 meta = meta,
                                 headers = self.headers,
                                 callback = self.book_parse)
                                 
    def book_parse(self, response):                                
        book_url_list = response.css('.table-hover a::attr("href")').extract()
        for i in book_url_list:
            book_url = 'http' + i.split('http')[-1]
            if 'zip' in book_url: # 网盘名包含zip,以此做判断,其它网盘效果一般,不采用
                self.wd.get(book_url) # 在浏览器中打开,通过self.wd.page_source获取浏览器里的响应内容
                try:
                    down_url = book_url.split('.')[0] + self.down_base_url + re.findall(self.reg, self.wd.page_source)[0]
                except:
                    break
                yield {
                    'title':response.meta['title'], 
                    'img':response.meta['img'], 
                    'info':response.meta['info'],
                    'down_url':down_url, 
                    'web_pan':book_url,
                }
            else:
                pass

# Debug
                # from scrapy.shell import inspect_response
                # inspect_response(response, self)
# scrapy runspider book1.py -o 2.json

数据后处理

# -*- coding: utf-8 -*-
import json

with open('2.json','r') as f:
    data = json.load(f)
    data = sorted(data, key =lambda x: x['info'][2:12], reverse=True)
    # 以出版日期对文章进行排序
    
    with open("1.txt", 'w') as t: 
    # 生成便于迅雷下载的链接文件
    # 先打开迅雷,再复制该文件内容,迅雷会自动弹出询问是否下载所有链接
        for i in data:
            t.write(i['down_url']+'\n')
            
    with open('1.md', 'w') as m: # 书籍有可能重复,但是链接是不同的,指向不同服务器
        m.write('### 英文Python电子书籍下载链接:\n')
        for i in data:
            m.write('- **[{title}]({down_url})** \n *{info}*\n ![](http:{img}) \n --- \n'.format(**i))
            # **i 表示解包字典

#import datetime
#datetime.datetime.strptime(x, '%Y-%m-%d')
上一篇 下一篇

猜你喜欢

热点阅读