python爬虫日记本我的专题爬虫

Scrapy爬虫——突破反爬虫最全策略解析

2017-09-28  本文已影响440人  youyuge

有条件的请支持慕课实战正版课程,本blog仅仅是归纳总结,自用。

一、爬虫与反爬虫基本概念

爬虫与反爬虫基本概念

二、反爬目的

反爬虫目的

三、爬虫与反爬的对抗史

对抗史

四、scrapy架构

新版官方的架构图

五、突破反爬取的策略

5.1 随机切换用户代理User-Agent

注意参考上面的结构图,scrapy里有两个Middleware,我们要自定义的是右边的Downloader Middleware

pip install fake-useragent

注意,fake-useragent库维护的user-agent列表存放在在线网页上,过低版本依赖的列表网页可能就会403,所以请记得更新fake-useragent

"""Set User-Agent header per spider or use a default value from settings"""

from scrapy import signals


class UserAgentMiddleware(object):
    """This middleware allows spiders to override the user_agent"""
    
    #缺省使用'Scrapy'作为用户代理,这很糟糕
    def __init__(self, user_agent='Scrapy'):
        self.user_agent = user_agent

    @classmethod
    def from_crawler(cls, crawler):
        o = cls(crawler.settings['USER_AGENT'])
        crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
        return o

    def spider_opened(self, spider):
        self.user_agent = getattr(spider, 'user_agent', self.user_agent)
    
    #此方法是关键,会给我们的请求加上默认的user-agent
    def process_request(self, request, spider):
        if self.user_agent:
            request.headers.setdefault(b'User-Agent', self.user_agent)
from fake_useragent import UserAgent
class RandomUserAgentMiddlware(object):
    '''
    随机更换user-agent
    模仿并替换site-package/scrapy/downloadermiddlewares源代码中的
    useragent.py中的UserAgentMiddleware类
    '''

    def __init__(self, crawler):
        super(RandomUserAgentMiddlware, self).__init__()
        self.ua = UserAgent()
        #可读取在settings文件中的配置,来决定开源库ua执行的方法,默认是random,也可是ie、Firefox等等
        self.ua_type = crawler.settings.get("RANDOM_UA_TYPE", "random")

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler)
    
    #更换用户代理逻辑在此方法中
    def process_request(self, request, spider):
        def get_ua():
            return getattr(self.ua, self.ua_type)

        print  get_ua()
        request.headers.setdefault('User-Agent', get_ua())
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'JobSpider.middlewares.RandomUserAgentMiddlware': 543,
   'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
}

5.2 随机更换代理ip策略

查看本机对外ip

5.2.1 ip代理概念

直接访问 通过代理服务器中转
request.meta['proxy'] = 'xxx.xx.xx.xxx:xx'

高匿代理:能将我们的本机ip完全隐藏,普通代理可能还是会将本机ip带给服务器

5.2.2 编写爬取西刺代理脚本

#!/usr/bin/env python
# encoding: utf-8
"""
@author: yousheng
@contact: 1197993367@qq.com
@site: http://youyuge.cn

@version: 1.0
@license: Apache Licence
@file: crawl_ip.py
@time: 17/9/27 下午3:06

"""

import requests #用requests库来做简单的网络请求
import MySQLdb
from scrapy.selector import Selector
#从scrapy的settings中导入数据库配置
from JobSpider.settings import MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DBNAME

conn = MySQLdb.connect(host=MYSQL_HOST, user=MYSQL_USER, passwd=MYSQL_PASSWORD,
                       db=MYSQL_DBNAME, charset='utf8')
cursor = conn.cursor()


def clear_table():
    # 清空表内容
    cursor.execute('truncate table proxy_ip')
    conn.commit()


def crawl_xici_ip(pages):
    '''
    爬取一定页数上的所有代理ip,每爬完一页,就存入数据库
    :return:
    '''
    clear_table()
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"}
    for i in range(1, pages):
        response = requests.get(url='http://www.xicidaili.com/nn/{0}'.format(i), headers=headers)

        all_trs = Selector(text=response.text).css('#ip_list tr')

        ip_list = []
        for tr in all_trs[1:]:
            ip = tr.xpath('td[2]/text()').extract_first().encode('utf8')
            port = tr.xpath('td[3]/text()').extract_first().encode('utf8')
            ip_type = tr.xpath('td[6]/text()').extract_first().encode('utf8')
            ip_speed = tr.xpath('td[7]/div/@title').extract_first()
            if ip_speed:
                ip_speed = float(ip_speed.split(u'秒')[0])
            ip_alive = tr.xpath('td[9]/text()').extract_first().encode('utf8')

            ip_list.append((ip, port, ip_type, ip_speed, ip_alive))

        # 每页提取完后就存入数据库
        for ip_info in ip_list:
            cursor.execute(
                "insert proxy_ip(ip, port, type, speed, alive) VALUES('{0}', '{1}', '{2}', {3}, '{4}')".format(
                    ip_info[0], ip_info[1], ip_info[2], ip_info[3], ip_info[4]
                )
            )

            conn.commit()


# ip的管理类
class IPUtil(object):
    # noinspection SqlDialectInspection
    def get_random_ip(self):
        # 从数据库中随机获取一个可用的ip
        random_sql = """
              SELECT ip, port, type FROM proxy_ip
            ORDER BY RAND()
            LIMIT 1
            """

        result = cursor.execute(random_sql)
        for ip_info in cursor.fetchall():
            ip = ip_info[0]
            port = ip_info[1]
            ip_type = ip_info[2]

            judge_re = self.judge_ip(ip, port, ip_type)
            if judge_re:
                return "{2}://{0}:{1}".format(ip, port, str(ip_type).lower())
            else:
                return self.get_random_ip()

    def judge_ip(self, ip, port, ip_type):
        # 判断ip是否可用,如果通过代理ip访问百度,返回code200则说明可用
        # 若不可用则从数据库中删除
        print 'begin judging ---->', ip, port, ip_type
        http_url = "https://www.baidu.com"
        proxy_url = "{2}://{0}:{1}".format(ip, port, str(ip_type).lower())
        try:
            proxy_dict = {
                "http": proxy_url,
            }
            response = requests.get(http_url, proxies=proxy_dict)
        except Exception as e:
            print "invalid ip and port,cannot connect baidu"
            self.delete_ip(ip)
            return False
        else:
            code = response.status_code
            if code >= 200 and code < 300:
                print "effective ip"
                return True
            else:
                print  "invalid ip and port,code is " + code
                self.delete_ip(ip)
                return False

    # noinspection SqlDialectInspection
    def delete_ip(self, ip):
        # 从数据库中删除无效的ip
        delete_sql = """
            delete from proxy_ip where ip='{0}'
        """.format(ip)
        cursor.execute(delete_sql)
        conn.commit()
        return True

if __name__ == '__main__':
    crawl_xici_ip(pages=3)
    # ip = IPUtil()
    # for i in range(20):
    #     print ip.get_random_ip()
from tools.crawl_ip import IPUtil
class RandomProxyMiddleware(object):
    # 动态设置ip代理
    def process_request(self, request, spider):
        ip_util = IPUtil()
        proxy_ip = ip_util.get_random_ip()
        print 'using ip proxy:', proxy_ip
        request.meta["proxy"] = proxy_ip

六、第三方库推荐

scrapy-proxies:封装好的ip代理工具
crawlera:官方ip代理插件,需要去官网购买key,可靠稳定,强大
tor洋葱网络 : 匿名发送数据,需要翻墙,稳定性非常高

上一篇 下一篇

猜你喜欢

热点阅读