Python爬虫

协程一例:用aiohttp代替requests写异步爬虫

2017-11-26  本文已影响299人  碎冰op

网上aiohttp做爬虫的资料太少,官网文档是英文的看起来麻烦,所以自己部分半带翻译式的总结下

通过requests获取html的函数基本上是这样

import requests


def func(url: str) ->str:
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
    cookies = {'Cookie': ''}
    # 这里暂时懒得用session, verify参数忽略https网页的ssl验证
    r = requests.get(url, headers=headers, timeout=10, cookies=cookies, verify=False)
    r.encoding = r.apparent_encoding  # 自动识别网页编码避免中文乱码,但会拖慢程序
    return r.text  # 或r.content


func('www.sina.com')

aiohttp改写

import asyncio

import aiohttp


async def html(url: str) ->str:
    code = 'utf-8'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
    async with aiohttp.ClientSession() as session:
        # 老版本aiohttp没有verify参数,如果报错卸载重装最新版本
        async with session.get(url, headers=headers, timeout=10, verify_ssl=False) as r:
            # text()函数相当于requests中的r.text,r.read()相当于requests中的r.content
            return await r.text()


loop = asyncio.get_event_loop()
loop.run_until_complete(html('www.sina.com'))
# 对需要ssl验证的网页,需要250ms左右等待底层连接关闭
loop.run_until_complete(asyncio.sleep(0.25))
loop.close()

基本上的改写如上,协程本身的概念不是重点,优越性单线程开销小啥的也不说了,这里只讲几个坑/注意事项。参考文档

# requests
return r.text, r.content
# aiohttp
return await r.text(), await r.read()  # 不要漏后面的await,每个coroutine都要接await
return await r.text(errors='ignore')  # 直接忽略那些错误,默认是strict严格模式导致出现错误时会直接抛异常终止程序。

这里注意到,r.encoding = r.apparent_encoding的原理是什么?为什么aiohttp没有类似代码?
首先,看一下r.apparent_encoding的源码

image.png
可以看出,写法其实就是
import chardet  # 有requests模块的话已经安装了这个


code = chardet.detect(content)['encoding']

换句话说,套用到aiohttp的代码中,本来应该这么写

import asyncio

import aiohttp
import chardet


async def html(url: str) ->str:
    code = 'utf-8'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
    async with aiohttp.ClientSession() as session:
        # 老版本aiohttp没有verify参数,如果报错卸载重装最新版本
        async with session.get(url, headers=headers, timeout=10, verify_ssl=False) as r:
            content = await r.read()
            code = chardet.detect(content)['encoding']
            # text()函数相当于requests中的r.text,不带参数则自动识别网页编码,同样会拖慢程序。r.read()相当于requests中的r.content
            return await r.text(encoding=code, errors='ignore')

不过实际上,r.text()encoding=None(默认参数)的时候已经包含了这一步,所以其实无需操心什么chardet,出现编码错误先ignore再单个网页具体分析,或者就不管算了。
这部分见文档

If encoding is None content encoding is autocalculated using Content-Type HTTP header and chardet tool if the header is not provided by server.
cchardet is used with fallback to chardet if cchardet is not available.

文档所写

import async_timeout

with async_timeout.timeout(0.001):
    async with session.get('https://github.com') as r:
        await r.text()

用了with还是会抛timeout异常...这时要把时间设的稍微长一点比如10s,以及捕捉timeout异常。此外,这种写法会避免concurrent.futures._base.CancelledError异常。这个异常意思是超时的场合还没完成的任务会被事件循环取消掉。

The event loop will ensure to cancel the waiting task when that timeout is reached and the task hasn't completed yet.

下面是两段作用完全一样的代码(有比较多的简化只保证正常运行),对比aiohttp和多线程
作用是读取网页内容的标题和正文

aiohttp

import asyncio

import aiohttp
# pip install readability-lxml以安装
from readability import Document


def title_summary(content: bytes, url: str):
    doc = Document(content, url)
    print(doc.short_title(), doc.summary())


async def read_one(id_: int, url: str):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
    async with aiohttp.ClientSession() as session:
        try:
            async with session.get(
                    url, headers=headers, timeout=1, verify_ssl=False) as r:
                await asyncio.sleep(1 + random())
                content, text = await r.read(), await r.text(
                    encoding=None, errors='ignore')
                if text:
                    title_summary(content, url)
        except:
            pass


def read_many(links: list):
    loop = asyncio.get_event_loop()
    to_do = [read_one(id_, url) for id_, url in links]
    loop.run_until_complete(asyncio.wait(to_do))
    # 或loop.run_until_complete(asyncio.gather(*to_do))这两行代码作用似乎没啥区别
    loop.close()


def main():
    links = [...]  # 要跑的所有链接列表
    read_many(links)


if __name__ == '__main__':
    main()

多线程

from concurrent import futures


import requests
from readability import Document


def title_summary(content: bytes, url: str):
    doc = Document(content, url)
    print(doc.short_title(), doc.summary())


def read_one(url: str):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
        try:
            r = requests.get(url, headers=headers, timeout=1, verify=False)
            r.encoding = r.apparent_encoding
            content, text = r.content, await r.text
            if text:
                title_summary(content, url)
        except:
            pass


def read_many(links: list) ->int:
    workers = min(100, len(links))  # 线程数
    with futures.ThreadPoolExecutor(workers) as e:
        res = e.map(read_one, links)
    return len(list(res))


def main():
    links = [...]
    read_many(links)


if __name__ == '__main__':
    main()

基本上,协程和线程的使用就是这样。但是,如果,任务数以千计时,asyncio可能会报错:ValueError: too many file descriptors in select()
这是因为asyncio内部调用select,这个打开文件数是有限度的,这部分需要复习深入理解计算机系统一书。
这个场合不能这样写,有可能用到回调,其实也可以不用

def read_many(links: list):
    loop = asyncio.get_event_loop()
    to_do = [read_one(id_, url) for id_, url in links]
    loop.run_until_complete(asyncio.wait(to_do))
    # 或loop.run_until_complete(asyncio.gather(*to_do))这两行代码作用似乎没啥区别
    loop.close()

以上代码这样改

def read_many(links: list):
    loop = asyncio.get_event_loop()
    for id_, url in links:
        task = asyncio.ensure_future(read_one(id_, url))
        loop.run_until_complete(task)
    loop.close()

即可。

如果要用回调的话,比较麻烦,不少地方要修改,见下,主要是参数传递上要多多注意。
其实没有必要用回调,虽然拆开写似乎更规范,而且可以在需要请求其他页面时重用,但是受限很多。

import asyncio

import aiohttp
# pip install readability-lxml以安装
from readability import Document


def title_summary(fut):
    res = fut.result()  # 回调中调用result()才是上个函数的真实返回值
    if res:
        content, url = res
        doc = Document(content, url)
        print(doc.short_title(), doc.summary())


async def read_one(id_: int, url: str):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
    async with aiohttp.ClientSession() as session:
        try:
            async with session.get(
                    url, headers=headers, timeout=1, verify_ssl=False) as r:
                await asyncio.sleep(1 + random())
                return await r.read(), await r.text(encoding=None, errors='ignore')
        except:
            pass


def read_many(links: list):
    loop = asyncio.get_event_loop()
    for id_, url in links:
        task = asyncio.ensure_future(read_one(id_, url))
        # 注意参数问题,这里不能传递多个参数,要么用functool的partial,要么干脆传递元组解包,也可以用lambda,官方比较推荐functool这里就不写了
        task.add_done_callback(title_summary)
        loop.run_until_complete(task)
    loop.close()


def main():
    links = [...]  # 要跑的所有链接列表
    read_many(links)


if __name__ == '__main__':
    main()
上一篇 下一篇

猜你喜欢

热点阅读