人生苦短

爬取糗事百科文字版

2018-11-13  本文已影响9人  dongger

主要收获

import requests
from bs4 import BeautifulSoup
import lxml
import time
import pymongo
client=pymongo.MongoClient('localhost',27017)
donger=client['donger']
sheet_2=donger['sheet_2']
urls=["https://www.qiushibaike.com/text/page/{}/".format(str(i)) for i in range(1,14)]
def getone_url(url):
    web_data=requests.get(url)
    neirongs=[]
    soup=BeautifulSoup(web_data.text,'lxml')
    authors=soup.select(' div > a >  h2 ')
    a=soup.select(' a.contentHerf > div > span ')
    for i in a:
        if i.get_text()!="查看全文":
            neirongs.append(i)
    numbers=soup.select('div.stats span.stats-vote i')
    discuss=soup.select('div.stats span.stats-comments i')
    for author ,neirong,number,discuss_one in zip(authors,neirongs,numbers,discuss):
        data={
            "author":author.get_text().strip(),
            "neirong":neirong.get_text().strip(),
            "number":int(number.get_text()),
            "discuss":discuss_one.get_text(),
        }
        print(data)
        sheet_2.insert_one(data)

for url in urls:
    getone_url(url)
    time.sleep(2)

# for item in sheet_2.find({'number':{'$gte':3000}}):
#     print (item['neirong'])
上一篇 下一篇

猜你喜欢

热点阅读