python爬虫大数据 爬虫Python AI Sql

[Python]融资募集说明书爬虫

2018-07-18  本文已影响91人  3inchtime

为公司机器学习提供基础资料,爬取中国货币网上的募资说明书,并将下载的pdf文档转换为txt文档。

1. 准备

本爬虫基于 Python 3.6.4
本爬虫主要需要pdf转txt的Python库pdfminer
首先pip install pdfminer.six

2. 分析API

爬取数据来自于http://www.chinamoney.com.cn/r/cms/chinese/chinamoney/html/ses/search-adv.html?searchValue=%E5%8B%9F%E9%9B%86%E8%AF%B4%E6%98%8E%E4%B9%A6&isInfoPublishLend=false&infoLevels=1,2,3,4,5&verify=&key=&oc1=/fe/Channel/28595&oc2=/fe/Channel/28352&entyTypeCode=&entyFullName=&code=&name=&searchPublishFlag=&searchBondFlag=&bondTypeCode=

通过Chrome开发者工具可以发现所有的数据全部通过POST请求从API获取。


http://www.chinamoney.com.cn/ses/rest/cm-u-notice-ses-cn/query?sort=date&date=all&text=%E5%8B%9F%E9%9B%86%E8%AF%B4%E6%98%8E%E4%B9%A6&channelIdStr=2496,2916,2833,2632,2589,2663,2556,2850,2884,2900,2496,2916,2833,2632,2589,2663,2556,2850,2884,2900,0000&method=queryByChannels&infoLevel=1,2,3,4,5&field=title&start=&end=&siteId=15&pageIndex=1&pageSize=15&isFuzzy=true&nodeLevel=1&isInfoPublishLend=false

只要修改相应的pageIndex就可以跳转页码。

3. 详解

建立循环,暂设为爬取1000页,使用requests库向需要爬取页码的API发送POST请求。

req = requests.post(url=url.format(i), headers=headers).text

使用json.dumps()方法讲返回的数据格式化为dict,从而获取到pdf的下载地址。

pdf_basic_url = 'http://www.chinamoney.com.cn/dqs/cm-s-notice-query/fileDownLoad.do?mode=open&contentId={}&priority=0'

data = json.loads(req)
for n in range(len(data['data']['pageResult']['entries'])):
    company_name = data['data']['pageResult']['entries'][n]['title']
    pdf_name = data['data']['pageResult']['entries'][n]['title'] + '.pdf'
    txt_name = data['data']['pageResult']['entries'][n]['title'] + '.txt'

    pdf_id = data['data']['pageResult']['entries'][n]['id']
    pdf_url = pdf_basic_url.format(pdf_id)

之后调用下载pdf的方法download_file()下载pdf

下载成功后调用parse_pdf()方法就可以了。

parse_pdf()方法借鉴http://www.voidcn.com/article/p-nxhbbaqq-hh.html

4. 运行

在服务器上同时运行五个进程,同时进行处理,考虑到这一点在逻辑中加入了一些去重的方法。


cup占用率一直100%,大约三分钟可以转换一篇。

5. 源码

# -*- coding: utf-8 -*-

import requests
import json
import os
import time

from pdfminer.pdfinterp import PDFResourceManager,PDFPageInterpreter
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTTextBoxHorizontal
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfpage import PDFParser
from pdfminer.pdfdocument import PDFDocument


def recruitement_spider():
    headers = {
        "Referer": "http://www.chinamoney.com.cn/r/cms/chinese/chinamoney/html/ses/search-adv.html?searchValue=%E5%8B%9F%E9%9B%86%E8%AF%B4%E6%98%8E%E4%B9%A6&isInfoPublishLend=false&infoLevels=1,2,3,4,5&verify=&key=&oc1=/fe/Channel/28595&oc2=/fe/Channel/28352&entyTypeCode=&entyFullName=&code=&name=&searchPublishFlag=&searchBondFlag=&bondTypeCode="
    }
    url = 'http://www.chinamoney.com.cn/ses/rest/cm-u-notice-ses-cn/query?sort=date&date=all&text=%E5%8B%9F%E9%9B%86%E8%AF%B4%E6%98%8E%E4%B9%A6&channelIdStr=2496,2916,2833,2632,2589,2663,2556,2850,2884,2900,2496,2916,2833,2632,2589,2663,2556,2850,2884,2900,0000&method=queryByChannels&infoLevel=1,2,3,4,5&field=title&start=&end=&siteId=15&pageIndex={}&pageSize=15&isFuzzy=true&nodeLevel=1&isInfoPublishLend=false'
    pdf_basic_url = 'http://www.chinamoney.com.cn/dqs/cm-s-notice-query/fileDownLoad.do?mode=open&contentId={}&priority=0'
    for i in range(1000, 2000):
        time.sleep(2)
        print('第{}页'.format(i))
        try:
            req = requests.post(url=url.format(i), headers=headers).text
            data = json.loads(req)
            for n in range(len(data['data']['pageResult']['entries'])):
                company_name = data['data']['pageResult']['entries'][n]['title']
                pdf_name = data['data']['pageResult']['entries'][n]['title'] + '.pdf'
                txt_name = data['data']['pageResult']['entries'][n]['title'] + '.txt'
                pdf_id = data['data']['pageResult']['entries'][n]['id']

                if os.path.isfile(txt_name):
                    print("Already Existed")
                    continue
                with open(txt_name, 'a') as f:
                    f.write('\n')
                print('Created-{}'.format(txt_name))

                pdf_url = pdf_basic_url.format(pdf_id)
                print(pdf_name, pdf_url)
                download_file(pdf_url, pdf_name)
                parse_pdf(pdf_name, txt_name)
                os.remove(pdf_name)
        except:
            print('{} Error'.format(company_name))
            pass


def parse_pdf(pdf_name, txt_name):
    fp = open(pdf_name, 'rb')
    parser = PDFParser(fp)
    document = PDFDocument(parser)
    if not document.is_extractable:
        raise PDFTextExtractionNotAllowed
    else:
        rsrcmgr = PDFResourceManager()
        laparams = LAParams()
        device = PDFPageAggregator(rsrcmgr, laparams=laparams)
        interpreter = PDFPageInterpreter(rsrcmgr, device)
        for page in PDFPage.create_pages(document):
            interpreter.process_page(page)
            layout = device.get_result()
            for x in layout:
                if (isinstance(x, LTTextBoxHorizontal)):
                    text = str(x.get_text())
                    with open(txt_name, 'a') as f:
                        f.write(text + '\n')
        return


def download_file(download_url, file_name):
    r = requests.get(download_url)
    if r.status_code == 200:
        with open(file_name, 'wb') as f:
            f.write(r.content)
        print("File Download Completed!")
        return True
    else:
        print("File Download Failed!")
        return False


if __name__ == '__main__':
    recruitement_spider()

上一篇 下一篇

猜你喜欢

热点阅读