day7、scrapy通过scrapy genspider -t

2018-08-22  本文已影响0人  是东东

原在命令行输入:
创建蜘蛛:scrapy genspider xxx xxx.com
运行蜘蛛:scrapy crawl xxx

现在命令行输入:
创建蜘蛛:scrapy genspider -t crawl xxx xxx.com
cd到spider文件夹运行蜘蛛:scrapy runspider xxx.py

dushu.py

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from seven_carwlSpiderDemo.items import SevenCarwlspiderdemoItem
#在scrapy框架中包含了两类分别是spider基础爬虫和CrawlSpider
#在CrawlSpider是spider的派生类,spider类的设计原则只爬取start_url中的网页内容,CrawlSpider定义了一些规则用于提供跟踪链接的方法,可以跟踪一堆的url


class DushuSpider(CrawlSpider):
    name = 'dushu'
    allowed_domains = ['dushu.com']
    start_urls = ['http://www.dushu.com/book/1163.html']
    #url的匹配规则,我们可以通过这个匹配规则去匹配所有的网站
    #rules规则包含了Rule对象,每一个Rule对象对爬取网站的动作做了一个特定的操作,根据LinkExtractor里面的内容去匹配网址,逐个的抓取,然后去回调响应的回调函数
    #用正则LinkExtractor(allow="某正则")
    #用xpath匹配,二级页面使用 LinkExtractor(retrict_xpath="某xpath路径")
    #用css匹配,LinkExtractor(retrict_css="某css选择器")
    rules = (
        Rule(LinkExtractor(allow=r'/book/1163_\d+\.html'), callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        # print(response)
        #提取出所有书籍
        books = response.xpath("//div[@class='book-info']")
        for book in books:
            item = SevenCarwlspiderdemoItem()
            item["title"] = book.xpath(".//h3/a/text()").extract_first()
            item["author"] = book.xpath("./p[1]/a/text()").extract_first()
            item["info"] = book.xpath("./p[2]/text()").extract_first()
            item["img_url"] = book.xpath(".//img/@data-original").extract_first()
            #获取二级页面的url
            book_url = "https://www.dushu.com" + book.xpath(".//h3/a/@href").extract_first()
            yield scrapy.Request(url=book_url, callback=self.parse_two, meta={'book_item': item})

    def parse_two(self, response):
        item = response.meta["book_item"]
        item['price'] = response.xpath("//p[@class='price']/span/text()").extract_first()
        item["summary"] = response.xpath("//div[starts-with(@class,'text txtsummary')]/text()").extract()[2]
        yield item

items.py

import scrapy


class SevenCarwlspiderdemoItem(scrapy.Item):
    title = scrapy.Field()
    author = scrapy.Field()
    info = scrapy.Field()
    img_url = scrapy.Field()
    price = scrapy.Field()
    summary = scrapy.Field()

pipelines.py

import pymysql


class SevenCarwlspiderdemoPipeline(object):
    # def open_spider(self, spider):
    #     self.conn = pymysql.connect(
    #         host="127.0.0.1",
    #         port=3306,
    #         user="root",
    #         password="9998",
    #         db="bookdb",
    #         charset="utf8",
    #     )
    #     self.cursor = self.conn.cursor()

    def process_item(self, item, spider):
        # #创建一个sql语句
        # sql = "INSERT INTO books VALUES(NULL,'%s','%s','%s','%s','%s','%s')" % (item["title"], item["author"], item["info"], item["img_url"], item["price"], item["summary"])
        # self.cursor.execute(sql)
        # self.conn.commit()

        return item

    # def close_spider(self, spider):
    #     self.conn.close()
    #     self.cursor.close()
上一篇 下一篇

猜你喜欢

热点阅读