用scrapy爬取读远网站书籍信息

2016-12-03  本文已影响479人  蜗牛仔

这里是spiders


Paste_Image.png
# -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
from Program.items import ProgramItem

class ReadcolorSpider(RedisSpider):
    name = "readcolorredis"
    allowed_domains = ["readcolor.com"]
    redis_key = 'readcolorredis:star_urls'
    start_urls = ['http://readcolor.com/lists']这句话一定要打开,不然就算网址喂给了redis,爬虫也读取不了
    url = 'http://readcolor.com'

    def parse(self, response):
        book_list_group = response.xpath('//article[@style="margin:10px 0 20px;"]')
        for book_list in book_list_group:
            item = ProgramItem()
            item['book_list_title'] = book_list.xpath('header/h3/a/text()').extract()[0]
            item['book_number'] = book_list.xpath('p/a/text()').extract()[0]
            book_list_url = book_list.xpath('header/h3/a/@href').extract()[0]
            yield scrapy.Request(self.url + book_list_url, callback=self.parse_book_list_detail, meta={'item': item})这句话,
将找到的每个书单的链接调用下面的函数,提取书单里面的信息

        next_page = response.xpath('//li[@class="next"]/a/@href').extract()
        if next_page:
                yield scrapy.Request(next_page[0], callback=self.parse)让爬虫自动爬取下一页
                
    def parse_book_list_detail(self, response):
        item = response.meta['item']
        summary = response.xpath('//div[@id="list-description"]/p/text()').extract()
        item['book_list_summary'] = '\n'.join(summary)
        #item['book_url']=response.xpath('//div[@id="body"]/div/div[3]/div[1]/ul[2]/li[1]/div/article/h3/a/@href').extract()[0]
        item['book_list_author'] =response.xpath('//div[@id="body"]/div/div[3]/div[1]/article/header/div/div/a/text()').extract()[0]
        item['book_list_date'] = response.xpath('//div[@id="body"]/div/div[3]/div[1]/article/header/div/div/text()').extract()[0]
        #item['book_name']=response.xpath('//div[@id="body"]/div/div[3]/div[1]/ul[2]/li[1]/div/article/h3/a/text()').extract()[0]
        #item['book_author'] =response.xpath('//div[@id="body"]/div/div[3]/div[1]/ul[2]/li[1]/div/article/h3/small/text()').extract()[0]
        #item['book_summary']=response.xpath('//div[@id="body"]/div/div[3]/div[1]/ul[2]/li[1]/div/article/p/text()').extract()[0]
        book_list = response.xpath('//li[@class="lists-book"]/div[@class="media"]/article')
        for book in book_list:
            item_book = item.copy()  # 深度copy,否则后面的结果会覆盖前面的结果。
            item_book['book_name'] = book.xpath('h3/a/text()').extract()[0]
            item_book['book_url'] = book.xpath('h3/a/@href').extract()[0]
            book_author = book.xpath('h3/small/text()').extract()
            item_book['book_author'] = book_author[0] if book_author else 'N/A'  # 由于书的作者和简介可能为空,因此这里需要做判断
            book_summary = book.xpath('p/text()').extract()
            item_book['book_summary'] = book_summary[0] if book_summary else 'N/A'

            yield item_book

        yield item


这是pipeline文件的代码

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings

class ProgramPipeline(object):
    '''def __init__(self):
        client = pymongo.MongoClient()
        db = client[settings['MONGODB_DBNAME']]
        self.post = db[settings['MONGODB_DOCNAME']]'''




    def __init__(self):
        host=settings['MONGODB_HOST']
        port=settings['MONGODB_PORT']
        db_name = settings['MONGODB_DBNAME']
        client=pymongo.MongoClient(host=host,port=port)链接数据库
        db=client[db_name]
        self.post=db[settings['MONGODB_DOCNAME']]
    def process_item(self, item, spider):
        book_info=dict(item)
        self.post.insert(book_info)
        return item

这是setting里面的代码,存数据库端口一些信息,以及redis所在的一些信息,因为redis在本机,所以
redis的IP就不用写出来,如果读取别的电脑的redis上的链接,就要写出来,这是分布式爬虫的原来
如果是别的

Paste_Image.png
# -*- coding: utf-8 -*-

# Scrapy settings for Program project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'Program'

SPIDER_MODULES = ['Program.spiders']
NEWSPIDER_MODULE = 'Program.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

DOWNLOAD_DELAY = 1 #每次请求间隔1秒

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Enables scheduling storing requests queue in redis.
SCHEDULER = "scrapy_redis.scheduler.Scheduler"调度器

# Ensure all spiders share same duplicates filter through redis.
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'Program.pipelines.ProgramPipeline': 300,
}

# Don't cleanup redis queues, allows to pause/resume crawls.
SCHEDULER_PERSIST = True

SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"队列顺序

MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'jikexueyuan'

运行结果

Paste_Image.png
上一篇 下一篇

猜你喜欢

热点阅读