python3学习笔记

初试python爬虫实战三——爬取考研帮导师简介

2020-03-16  本文已影响0人  一个王二不小

1 基础版本

1.1 抓取首页链接,依次访问

与前两次实战的区别不大,所以这次尝试模块化编程。不再写之前脚本式的代码。写完代码之后来写的博客,所以只简单记一下编码过程中遇到的问题以及解决方案。

 <html lang="zh">
  <head>
    ...

猜想还是编码的问题。所以在requests的请求中,最终返回做一个编码处理:

 # 出现中文乱码的解决方法
    resp.encoding = 'utf-8'
 resp = resp.text.replace('\t', '')
              .replace('\r', '')
              .replace('\n', '').replace('\u3000', '')

基础版本首页最终代码:

import time
import requests
from lxml import etree

start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}


# 访问
def get_resp(url):
    ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
         'AppleWebKit/537.36 (KHTML, like Gecko) ' \
         'Chrome/80.0.3987.116 Safari/537.36'
    header = {'User-Agent': ua}
    resp = requests.get(url, headers=header)
    # 出现中文乱码的解决方法
    resp.encoding = 'utf-8'
    if resp.status_code == 200:
        # return resp.text
        resp = resp.text.replace('\t', '').replace('\r', '').replace('\n', '').replace('\u3000', '')
        return resp
    else:
        print(url + '访问失败')


def parse_teacher(resp_page):

    et = etree.HTML(resp_page)
    if et is not None:
        # 处理标题
        selectors = et.xpath('//h1')
        titles.append(selectors[0].text)

        # 处理详情介绍
        selectors2 = et.xpath("//div[@class='articleCon']//p/text()")
        text = ''
        for s in selectors2:
            text += s
        context.append(text)

    else:
        print('发现一个异常页面,已跳过')


# 处理链接,依次访问超链接
def link_parse(resp):
    et = etree.HTML(resp)
    links = et.xpath("//ul[@class='list areaZslist']/li//a/@href")
    for link in links:
        resp_page = get_resp(link)
        parse_teacher(resp_page)


def make_and_print():
    data.update(zip(titles, context))
    print(data)


if __name__ == '__main__':
    start_time = time.time()

    # 访问初始url
    resp = get_resp(start_url)
    # 处理链接 循环访问下载
    link_parse(resp)
    # 处理最终数据并输出
    make_and_print()

    last_time = time.time() - start_time
    print(last_time)

输出效果为:


V1效果

1.2 获取下一页

这种数据值抓取一页当然是毫无意义的,下面要做的是检查页面是否存在下一页,如果存在,再继续加载下一页。核心代码为:

# 多页处理
    next_url = et.xpath('//div[4]/a[11]/@href')
    if next_url:
        print('下一页地址:', next_url[0])
        r = get_resp(next_url[0])
        link_parse(r)
    else:
        print('页面加载完毕,开始逐个下载导师资料,请稍后...')

1.3 保存到记事本

以title为文件名,将各个导师信息保存到记事本中。

def save_data(path, dicta):
    if not os.path.exists(path):
        os.mkdir(os.getcwd() + '\\data_out')
    os.chdir(path)
    for k, v in dicta.items():
        filename = k+'.txt'
        file_context = v
        f = open(filename, 'w+', encoding='utf-8')
        f.write(file_context)
        f.seek(0)
        f.close()
        print(k, '资料保存完成!')

实现的效果为:


最终效果1 最终效果2 最终效果3

完美实现需求。第一部分结束。第一部分最终完整代码:

import time
import os
import requests
from lxml import etree

start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}


# 访问
def get_resp(url):
    ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
         'AppleWebKit/537.36 (KHTML, like Gecko) ' \
         'Chrome/80.0.3987.116 Safari/537.36'
    header = {'User-Agent': ua}
    resp = requests.get(url, headers=header)
    # 出现中文乱码的解决方法
    resp.encoding = 'utf-8'
    if resp.status_code == 200:
        # return resp.text
        resp = resp.text.replace('\t', '') \
            .replace('\r', '') \
            .replace('\n', '') \
            .replace('\u3000', '') \
            .replace('\xa0', '')
        return resp
    else:
        print(url + '访问失败')


def parse_teacher(resp_page):
    et = etree.HTML(resp_page)
    if et is not None:
        # 处理标题
        selectors = et.xpath('//h1')
        titles.append(selectors[0].text)

        # 处理详情介绍
        selectors2 = et.xpath("//div[@class='articleCon']//p/text()")
        text = ''
        for s in selectors2:
            text += s
        context.append(text)

    else:
        print('发现一个异常页面,已跳过')


# 处理链接,依次访问超链接
def link_parse(resp):
    et = etree.HTML(resp)
    links = et.xpath("//ul[@class='list areaZslist']/li//a/@href")
    # 多页处理
    next_url = et.xpath('//div[4]/a[11]/@href')
    if next_url:
        print('下一页地址:', next_url[0])
        r = get_resp(next_url[0])
        link_parse(r)
    else:
        print('页面加载完毕,开始逐个下载导师资料,请稍后...')
    for link in links:
        resp_page = get_resp(link)
        parse_teacher(resp_page)


def make_and_print():
    data.update(zip(titles, context))
    print(data)


def save_data(path, dicta):
    if not os.path.exists(path):
        os.mkdir(os.getcwd() + '\\data_out')
    os.chdir(path)
    for k, v in dicta.items():
        filename = k+'.txt'
        file_context = v
        f = open(filename, 'w+', encoding='utf-8')
        f.write(file_context)
        f.seek(0)
        f.close()
        print(k, '资料保存完成!')


if __name__ == '__main__':
    start_time = time.time()
    # save_data('./data_out', data)

    # 访问初始url
    resp = get_resp(start_url)
    # 处理链接 循环访问下载
    link_parse(resp)
    # 处理最终数据并输出
    make_and_print()
    save_data('./data_out', data)

    print(len(titles))
    last_time = time.time() - start_time
    print(last_time)

2.多线程版

虽然数据量比较小,耗费的时间也并不多。但是觉得前面的多线程还是没怎么理解,再写一个demo试试吧~
首先导入threading系统库。注意ing。导入Queue用于存放链接队列。

import threading

定义一个变量,控制线程个数。定义一个线程池,保存线程。定义一个链接队列

thread_num = 10
threads = []
links_queue = Queue()

在链接处理函数中,将所有链接存入队列。

 for link in links:
        links_queue.put(link)

在主函数中创建线程

 for t in range(thread_num):
        t = threading.Thread(target=download)
        t.start()
        threads.append(t)

编写download()方法

def download():
    while True:
        link = links_queue.get()
        if link is None:
            break
        resp_page = get_resp(link)
        parse_teacher(resp_page)
        print('当前下载线程数:%s,剩余%s条链接未解析' %
              (len(threading.enumerate())-1, links_queue.qsize()))

依次退出线程

    for i in range(thread_num):
        links_queue.put(None)
    for t in threads:
        t.join()

实际效果明显增快

效果1 效果2 效果3

与单线程相比,增速一倍以上!

贴出多线程版完整代码:

# 多线程爬取导师信息
import time
import os
import threading
from queue import Queue
import requests
from lxml import etree

start_url = 'http://www.kaoyan.com/daoshi/'
titles = []
context = []
data = {}

thread_num = 10
threads = []
links_queue = Queue()


# 访问
def get_resp(url):
    ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
         'AppleWebKit/537.36 (KHTML, like Gecko) ' \
         'Chrome/80.0.3987.116 Safari/537.36'
    header = {'User-Agent': ua}
    resp = requests.get(url, headers=header)
    # 出现中文乱码的解决方法
    resp.encoding = 'utf-8'
    if resp.status_code == 200:
        # return resp.text
        resp = resp.text.replace('\t', '') \
            .replace('\r', '') \
            .replace('\n', '') \
            .replace('\u3000', '') \
            .replace('\xa0', '')
        return resp
    else:
        print(url + '访问失败')


def parse_teacher(resp_page):
    et = etree.HTML(resp_page)
    if et is not None:
        # 处理标题
        selectors = et.xpath('//h1')
        titles.append(selectors[0].text)

        # 处理详情介绍
        selectors2 = et.xpath("//div[@class='articleCon']//p/text()")
        text = ''
        for s in selectors2:
            text += s
        context.append(text)

    else:
        print('发现一个异常页面,已跳过')


# 处理链接,依次访问超链接
def link_parse(resp):
    et = etree.HTML(resp)
    links = et.xpath("//ul[@class='list areaZslist']/li//a/@href")
    # 多页处理
    next_url = et.xpath('//div[4]/a[11]/@href')
    if next_url:
        print('下一页地址:', next_url[0])
        r = get_resp(next_url[0])
        link_parse(r)
    else:
        print('页面加载完毕,开始逐个下载导师资料,请稍后...')
    for link in links:
        links_queue.put(link)
        # resp_page = get_resp(link)
        # parse_teacher(resp_page)


def make_and_print():
    data.update(zip(titles, context))
    print(data)


def save_data(path, dicta):
    if not os.path.exists(path):
        os.mkdir(os.getcwd() + '\\data_out')
    os.chdir(path)
    for k, v in dicta.items():
        filename = k+'.txt'
        file_context = v
        f = open(filename, 'w+', encoding='utf-8')
        f.write(file_context)
        f.seek(0)
        f.close()
        print(k, '资料保存完成!')


def download():
    while True:
        link = links_queue.get()
        if link is None:
            break
        resp_page = get_resp(link)
        parse_teacher(resp_page)
        print('当前下载线程数:%s,剩余%s条链接未解析' %
              (len(threading.enumerate())-1, links_queue.qsize()))


if __name__ == '__main__':
    start_time = time.time()
    # 访问初始url
    resp = get_resp(start_url)
    # 处理链接 循环访问下载
    link_parse(resp)

    for t in range(thread_num):
        t = threading.Thread(target=download)
        t.start()
        threads.append(t)

    for i in range(thread_num):
        links_queue.put(None)
    for t in threads:
        t.join()

    # 处理最终数据并输出
    make_and_print()
    save_data('./data_out', data)

    last_time = time.time() - start_time
    print('共下载%s条导师信息,耗时%s秒' % (len(titles), last_time))
上一篇 下一篇

猜你喜欢

热点阅读