【原创】Python网络爬虫

【原创】Python爬虫-下载斗图网的最新表情

2020-04-14  本文已影响0人  复苏的兵马俑

A、实现功能:
  1、下载斗图网中的最新表情,并存放到指定目录下;
  2、需将图片的名字改成对应的中文内容(需要保持后缀不变);
  3、采用单线程和多线程两种方式实现。

B、使用模块:requests模块、re模块、os模块、lxml模块和urllib模块。

1、单线程实现

import requests
from lxml import etree
from urllib import request
import os
import re
def parse_page(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.9 Safari/537.36'
    }
    response = requests.get(url, headers = headers)
    text = response.text
    html = etree.HTML(text)
    imgs = html.xpath('//div[@class="page-content text-center"]//img[@class!="gif"]')
    for img in imgs:
        img_url = img.get('data-original')
        alt = img.get('alt')
        alt = re.sub(r'[\??\.,。!!]', '', alt)
        suffix = os.path.splitext(img_url)[1]
        filename = alt + suffix
        request.urlretrieve(img_url, 'images/'+filename)


def main():
    for i in range(1, 101):
        url = 'https://www.doutula.com/photo/list/?page={}'.format(i)
        parse_page(url)

if __name__ == '__main__':
    main()

2、多线程实现

import requests
from lxml import etree
from urllib import request
import os
import re
from queue import Queue
import threading


class Producer(threading.Thread):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.9 Safari/537.36',
        'Referer': 'https://www.doutula.com/photo/list/'
    }

    def __init__(self, page_queue, img_queue, *args, **kwargs):
        super(Producer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.parse_page(url)

    def parse_page(self, url):
        response = requests.get(url, headers=self.headers)
        text = response.text
        html = etree.HTML(text)
        imgs = html.xpath('//div[@class="page-content text-center"]//img[@class!="gif"]')
        for img in imgs:
            img_url = img.get('data-original')
            alt = img.get('alt')
            alt = re.sub(r'[\??\.,。!!\*]', '', alt)
            suffix = os.path.splitext(img_url)[1]
            filename = alt + suffix
            self.img_queue.put((img_url, filename))


class Consumer(threading.Thread):
    def __init__(self, page_queue, img_queue, *args, **kwargs):
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.img_queue = img_queue

        while True:
            if self.img_queue.empty() and self.page_queue.empty():
                break
            img_url, filename = self.img_queue.get()
            request.urlretrieve(img_url, 'images/' + filename)
            print(filename + '      下载完成!')


def main():
    page_queue = Queue(100)
    img_queue = Queue(1000)
    for i in range(1, 101):
        url = 'https://www.doutula.com/photo/list/?page={}'.format(i)
        page_queue.put(url)

    for i in range(5):
        t = Producer(page_queue, img_queue)
        t.start()

    for i in range(5):
        t = Consumer(page_queue, img_queue)
        t.start()


if __name__ == '__main__':
    main()
上一篇 下一篇

猜你喜欢

热点阅读