requests+pyquery+csv爬取做好大学排行榜

2020-05-29  本文已影响0人  不吃唐僧肉的妖怪
import requests

from pyquery import PyQuery as pq
from requests.exceptions import RequestException
import csv

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
                  'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
}


def get_link_detail(url):
    try:
        resp = requests.get(url, headers=HEADERS)
        text_code = resp.apparent_encoding
        text = resp.content.decode(text_code, 'ignore')
        html = pq(text)
        trs = html(".news-text table tbody tr")
        list_data = []
        for tr in trs.items():
            dict_data = {}
            dict_data['sort'] = tr('td:nth-child(1)').text()
            dict_data['name'] = tr('td:nth-child(2) div').text()
            dict_data['city'] = tr('td:nth-child(3)').text()
            dict_data['score'] = tr('td:nth-child(4)').text()
            dict_data['indicator5'] = tr('td.indicator5').text()
            dict_data['indicator6'] = tr('td.indicator6').text()
            dict_data['indicator7'] = tr('td.indicator7').text()
            dict_data['indicator8'] = tr('td.indicator8').text()
            dict_data['indicator9'] = tr('td.indicator9').text()
            list_data.append(dict_data)
        return list_data
    except RequestException as e:
        print("发生错误了")


def write_data(data):
    headers = ['sort', 'name', 'city', 'score', 'indicator5', 'indicator6',
               'indicator7', 'indicator8', 'indicator9']
    with open('data.csv', 'w', encoding="utf-8", newline='') as fp:
        writer = csv.DictWriter(fp, headers)
        writer.writeheader()
        writer.writerows(data)


if __name__ == '__main__':
    url = 'http://zuihaodaxue.com/zuihaodaxuepaiming2019.html'
    data = get_link_detail(url)
    write_data(data)
上一篇 下一篇

猜你喜欢

热点阅读