Python网络爬虫05——实例一

2018-09-11  本文已影响0人  远航天下

代码如下:

import json
import re
import requests
import time
from requests.exceptions import RequestException
from multiprocessing import Pool

author = 'damao'

"""爬取猫眼电影TOP100"""


def get_one_page(url):
    # header = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) App leWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53"}
    header = {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3"}
    try:
        response = requests.get(url=url,headers=header)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        return None

def parse_one_page(html):
    pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
                         +'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
                          +'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>',re.S)
    items = re.findall(pattern=pattern,string=html)
    for item in items:
        yield {
                "index": item[0],
                "image": item[1],
                "name": item[2],
                "acyor": item[3].strip()[3:],
                "time" : item[4].strip()[5:],
                "score": item[5]+item[6]
        }

def write_to_file(content):
    with open(file="test.txt",mode='a',encoding="utf-8") as f:
        f.write(json.dumps(content) + "\n")


def main(offset):
    url = 'http://maoyan.com/board/4?offset=' + str(offset)
    html = get_one_page(url=url)
    # print(html)
    for item in parse_one_page(html):
        print(item)
        write_to_file(item)
        # time.sleep(1)


if __name__ == '__main__':
    # for i in range(10):
    #     main(i*10)
    pool = Pool()
    pool.map(main,[i*10 for i in range(10)])
上一篇下一篇

猜你喜欢

热点阅读