Python爬虫 --- Scrapy爬取黄页88网企业信息

2019-03-12  本文已影响0人  成长之路丶

目标:

用scrapy爬取黄页88网站所有企业信息,并把爬取的所有信息存入到mysql数据库中。

目标分析:

通过F12抓包调试后发现整个网站并没有什么反爬虫机制,只是爬取的数据的层次比较多(公司分类比较多),所有我打算使用crawlspider爬虫来爬取整个项目,这样可以大量的减少书写的代码量。分析一下爬取的顺序:
1.从首页的行业企业分类为入口(有36个分类):



2.进入每一分类,每一分类里面又有很多分类,我们按省分类来爬取:



3.最后爬取省分类里具体的公司数据,值得注意的是这里的公司链接并不是真正的详情页,为了少写代码我们可以通过crawlspider爬虫Rule中的 process_links来处理链接:

编写爬虫文件

1.创建scrapy项目和爬虫:

E:\>scrapy startproject Huangye
E:\>scrapy genspider -t crawl huangye88 huangye88.com

2.编写items.py:

import scrapy


class HuangyeItem(scrapy.Item):
    # 公司名称
    company_name = scrapy.Field()
    # 公司介绍
    company_introduction = scrapy.Field()
    # 公司基本信息
    base_info = scrapy.Field()
    # 公司详细信息
    detail_info = scrapy.Field()

3.编写Spider:

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from Huangye.items import HuangyeItem


class HuangyeSpider(CrawlSpider):
    name = 'huangye88'
    allowed_domains = ['huangye88.com']
    start_urls = ['http://www.huangye88.com/']
   
    rules = (
        Rule(LinkExtractor(restrict_xpaths='//div[@class="tit_list"]/ul[@id="list2_3"]/li/a')),
        Rule(LinkExtractor(restrict_xpaths='//div[@class="box"][1]/div[@class="ad_list"]/a')),
        Rule(LinkExtractor(restrict_xpaths='//dt/h4/a'),callback='parse_item',process_links="deal_links"),
        Rule(LinkExtractor(restrict_xpaths='//div[@class="page_tagBaidu_paging_indicator"]/a[text()="下一页"]'), follow=True))
     

    def deal_links(self, links):
        # 对抓取的公司的url进行处理,构造公司详情页地址,并返回给下载器下载
        for link in links:
            link.url = link.url + "company_detail.html"
        return links

    def parse_item(self, response):
        item = HuangyeItem()
        # 公司名称
        item['company_name'] = response.xpath('//div[@class="data"]/p/text()').extract_first()
        # 公司介绍
        item['company_introduction'] = "".join(response.xpath('//div[@class="r-content"]/p[@class="txt"]//text()').extract())
        """
        通过观察详情页公司的基本信息和详细信息发现每个公司基本信息和详细信息并不统一,有的有8个字段有的有5或者6个字段,
        而且源文件中并没有明显的区别样式,如果一条信息一个字段存入数据则在程序中需要大量的判断,所以这里我把这些信息定义到一个字段中
        """
        info = response.xpath('//div[@class="data"]/ul[@class="con-txt"]/li')
        # table标签不好找,我们先找到它兄弟节点然后用following-sibling::table就可以找到这个table
        data = response.xpath('//p[@class="txt"]/following-sibling::table[1]/tr')
        # 定义一个空列表存储基本信息
        base_info = []
        for i in info:
            # 对每一个信息进行追加
            base_info.append("".join(i.xpath('.//text()').extract()))
        # 把列表转换成字符串并用逗号把每个信息隔开
        item['base_info'] = ",".join(base_info)
        # 定义一个空列表存储详细信息
        detail_info = []
        for j in data:
            # 对每一个信息进行追加
            detail_info.append(":".join(j.xpath('./td//text()').extract()))
        # 把列表转换成字符串并用逗号把每个信息隔开
        item['detail_info'] = ",".join(detail_info)
        yield item

4.编写PIPELINE:

from Huangye.settings import DATABASE_DB, DATABASE_HOST, DATABASE_PORT, DATABASE_PWD, DATABASE_USER
import pymysql

class HuangyePipeline(object):
    def __init__(self):
        host = DATABASE_HOST
        port = DATABASE_PORT
        user = DATABASE_USER
        passwd = DATABASE_PWD
        db = DATABASE_DB
        try:
            self.conn = pymysql.Connect(host=host, port=port, user=user, passwd=passwd, db=db, charset='utf8')
        except Exception as e:
            print("连接数据库出错,错误原因%s"%e)
        self.cur = self.conn.cursor()

    def process_item(self, item, spider):
        params = [item['company_name'], item['company_introduction'], item['base_info'], item['detail_info']]
        try:
            com = self.cur.execute(
                'insert into huangye88(company_name, company_introduction, base_info, detail_info)values (%s,%s,%s,%s)',params)
            self.conn.commit()
        except Exception as e:
            print("插入数据出错,错误原因%s" % e)
        return item

    def close_spider(self, spider):
        self.cur.close()
        self.conn.close()

5.编写settings.py

BOT_NAME = 'Huangye'

SPIDER_MODULES = ['Huangye.spiders']
NEWSPIDER_MODULE = 'Huangye.spiders'

ROBOTSTXT_OBEY = False

DOWNLOAD_DELAY = 3

COOKIES_ENABLED = False

DOWNLOADER_MIDDLEWARES = {
   # 'Huangye.middlewares.HuangyeDownloaderMiddleware': 543,
   'Huangye.middlewares.RandomUserAgent': 105,
   'Huangye.middlewares.RandomProxy': 106,
}


ITEM_PIPELINES = {
    'Huangye.pipelines.HuangyePipeline': 100,
}

DATABASE_HOST = '数据库ip'
DATABASE_PORT = 3306
DATABASE_USER = '数据库用户名'
DATABASE_PWD = '数据库密码'
DATABASE_DB = '数据表'


USER_AGENTS = [
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5"
    ]


PROXIES = [
    {'ip_port': '代理ip:代理IP端口', 'user_passwd': '代理ip用户名:代理ip密码'},
    {'ip_port': '代理ip:代理IP端口', 'user_passwd': '代理ip用户名:代理ip密码'},
    {'ip_port': '代理ip:代理IP端口', 'user_passwd': '代理ip用户名:代理ip密码'},
]

6.让项目跑起来:

E:\>scrapy crawl huangye88 

7.结果展示:



虽然爬虫能够得到数据,但是由于爬取的分类以及层次过多数据量比较大,所以爬取的速度比较慢,我们可以把它写成scrapy-redis分布式爬虫,这样多台主机爬取速度就快多了,而且能够起到一定的反反爬作用。

改写成scrapy-redis分布式

把scrapy爬虫改成scrapy-redis分布式爬虫操作很简单,只要更改一下spider文件导入scrapy-redis,把爬虫继承的父类改成scrapy-redis爬虫,然后再把start_urls改成redis_key,其次改一下settings文件,改一下调度器配置一下redis。

改写spider

from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider
from Huangye.items import HuangyeItem


class HuangyeSpider(RedisCrawlSpider):
    name = 'huangye88'
    allowed_domains = ['huangye88.com']
    # start_urls = ['http://www.huangye88.com/']
    redis_key = 'huangye88:start_urls'
   
    rules = (
        Rule(LinkExtractor(restrict_xpaths='//div[@class="tit_list"]/ul[@id="list2_3"]/li/a')),
        Rule(LinkExtractor(restrict_xpaths='//div[@class="box"][1]/div[@class="ad_list"]/a')),
        Rule(LinkExtractor(restrict_xpaths='//dt/h4/a'),callback='parse_item',process_links="deal_links"),
        Rule(LinkExtractor(restrict_xpaths='//div[@class="page_tagBaidu_paging_indicator"]/a[text()="下一页"]'), follow=True))
     

    def deal_links(self, links):
        # 对抓取的公司的url进行处理,构造公司详情页地址,并返回给下载器下载
        for link in links:
            link.url = link.url + "company_detail.html"
        return links

    def parse_item(self, response):
        item = HuangyeItem()
        # 公司名称
        item['company_name'] = response.xpath('//div[@class="data"]/p/text()').extract_first()
        # 公司介绍
        item['company_introduction'] = "".join(response.xpath('//div[@class="r-content"]/p[@class="txt"]//text()').extract())
        """
        通过观察详情页公司的基本信息和详细信息发现每个公司基本信息和详细信息并不统一,有的有8个字段有的有5或者6个字段,
        而且源文件中并没有明显的区别样式,如果一条信息一个字段存入数据则在程序中需要大量的判断,所以这里我把这些信息定义到一个字段中
        """
        info = response.xpath('//div[@class="data"]/ul[@class="con-txt"]/li')
        # table标签不好找,我们先找到它兄弟节点然后用following-sibling::table就可以找到这个table
        data = response.xpath('//p[@class="txt"]/following-sibling::table[1]/tr')
        # 定义一个空列表存储基本信息
        base_info = []
        for i in info:
            # 对每一个信息进行追加
            base_info.append("".join(i.xpath('.//text()').extract()))
        # 把列表转换成字符串并用逗号把每个信息隔开
        item['base_info'] = ",".join(base_info)
        # 定义一个空列表存储详细信息
        detail_info = []
        for j in data:
            # 对每一个信息进行追加
            detail_info.append(":".join(j.xpath('./td//text()').extract()))
        # 把列表转换成字符串并用逗号把每个信息隔开
        item['detail_info'] = ",".join(detail_info)
        yield item

改写settings文件

BOT_NAME = 'Huangye'

SPIDER_MODULES = ['Huangye.spiders']
NEWSPIDER_MODULE = 'Huangye.spiders'

SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
SCHEDULER_PERSIST = True
# 配置redis
REDIS_URL = 'redis://user:pass@hostname:port'
#默认情况下,RFPDupeFilter只记录第一个重复请求。将DUPEFILTER_DEBUG设置为True会记录所有重复的请求。
DUPEFILTER_DEBUG =True
LOG_LEVEL = 'DEBUG'


ROBOTSTXT_OBEY = False

DOWNLOAD_DELAY = 3

COOKIES_ENABLED = False

DOWNLOADER_MIDDLEWARES = {
   # 'Huangye.middlewares.HuangyeDownloaderMiddleware': 543,
   'Huangye.middlewares.RandomUserAgent': 105,
   'Huangye.middlewares.RandomProxy': 106,
}


ITEM_PIPELINES = {
    'Huangye.pipelines.HuangyePipeline': 100,
}

DATABASE_HOST = '数据库ip'
DATABASE_PORT = 3306
DATABASE_USER = '数据库用户名'
DATABASE_PWD = '数据库密码'
DATABASE_DB = '数据表'


USER_AGENTS = [
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5"
    ]


PROXIES = [
    {'ip_port': '代理ip:代理IP端口', 'user_passwd': '代理ip用户名:代理ip密码'},
    {'ip_port': '代理ip:代理IP端口', 'user_passwd': '代理ip用户名:代理ip密码'},
    {'ip_port': '代理ip:代理IP端口', 'user_passwd': '代理ip用户名:代理ip密码'},
]
上一篇 下一篇

猜你喜欢

热点阅读