Python和爬虫Python 运维Python语言与信息数据获取和机器学习

Python爬虫系列(七)豆瓣图书排行榜(数据存入到数据库)

2017-04-30  本文已影响361人  致Great
豆瓣用户每天都在对“读过”的书进行“很差”到“力荐”的评价,豆瓣根据每本书读过的人数
以及该书所得的评价等综合数据,通过算法分析产生了豆瓣图书250。

网址:豆瓣图书 Top250

爬取的数据:每本书的名字,作者,评分,书中名言,需要用到的库lxml,大家对xpath语法有一定了解
1.网站的数据位置


网站数据位置.png

2.数据库中的book表中字段


book表字段.png

一、分析网站结构,找数据所在位置

网站结构

对于名字、作者、评分、名言提取使用下面的代码:

import requests

from lxml import etree
res=requests.get('https://book.douban.com/top250?start=0')
html=etree.HTML(res.text)
book_names=html.xpath('//div[@class="pl2"]/a/@title')
book_authors=html.xpath('//p[@class="pl"]/text()')
book_ratios=html.xpath('//span[@class="rating_nums"]/text()')
book_quotes=html.xpath('//span[@class="inq"]/text()')
# print(book_authors)
# print(book_ratios)
# print(len(book_ratios))
# print(book_quotes)
for i in zip(book_names,book_quotes,book_ratios):
    a,b,c=i
#     print(a,b,c)
authors=[]
for i in book_authors:
    #print(i.split('/')[0])
    authors.append(i.split('/')[0])
# print(authors)

for i in zip(book_names,authors,book_quotes,book_ratios):
    a,b,c,d=i
#     print(i)
    print(a,b,c,d)

下面是获取数据的具体代码:

book_names=html.xpath('//div[@class="pl2"]/a/@title')
book_authors=html.xpath('//p[@class="pl"]/text()')
book_ratios=html.xpath('//span[@class="rating_nums"]/text()')
book_quotes=html.xpath('//span[@class="inq"]/text()')

其中对作者提取解释如下:
book_authors="[美] 卡勒德·胡赛尼 / 李继宏 / 上海人民出版社 / 2006-5 / 29.00元"
然后用切片操作,获取第一个元素,既是原作者,然后追加到列表中:

authors=[]
for i in book_authors:
    #print(i.split('/')[0])
    authors.append(i.split('/')[0])

二、Python连接Mysql数据库:使用pymysql

import pymysql
conn=pymysql.connect(host='localhost',port=3306,user='root',passwd='',db='test')#创建连接
conn.set_charset('utf8')#设置编码
cursor=conn.cursor()#创建游标
for i in zip(book_names,authors,book_quotes,book_ratios):
#     a,b,c,d=i
#     sql="insert into book (book_name,book_author,book_quote,book_radio) values('%s','%s','%s','%s')"%(a,b,c,d)
    sql="insert into book (book_name,book_author,book_quote,book_radio) values('%s','%s','%s','%s')"%i#和上一句等同
    print(sql)
    cursor.execute(sql)#执行sql语句,进行插入操作
conn.commit()
conn.close()#关闭连接、释放资源

输出为:

insert into book (book_name,book_author,book_quote,book_radio) values('追风筝的人','[美] 卡勒德·胡赛尼 ','为你,千千万万遍','8.8')
insert into book (book_name,book_author,book_quote,book_radio) values('小王子','[法] 圣埃克苏佩里 ','献给长成了大人的孩子们','9.0')
insert into book (book_name,book_author,book_quote,book_radio) values('围城','钱锺书 ','对于“人艰不拆”四个字最彻底的违抗','8.9')
insert into book (book_name,book_author,book_quote,book_radio) values('活着','余华 ','活着本身就是人生最大的意义','9.1')
insert into book (book_name,book_author,book_quote,book_radio) values('解忧杂货店','[日] 东野圭吾 ','一碗精心熬制的东野牌鸡汤,拒绝很难','8.6')
insert into book (book_name,book_author,book_quote,book_radio) values('白夜行','[日] 东野圭吾 ','暗夜独行的残破灵魂,爱与恶本就难分难舍','9.1')
insert into book (book_name,book_author,book_quote,book_radio) values('挪威的森林','[日] 村上春树 ','村上之发轫,多少人的青春启蒙','8.0')
insert into book (book_name,book_author,book_quote,book_radio) values('嫌疑人X的献身','[日] 东野圭吾 ','数学好是一种极致的浪漫','8.9')
insert into book (book_name,book_author,book_quote,book_radio) values('三体','刘慈欣 ','你我不过都是虫子','8.8')
insert into book (book_name,book_author,book_quote,book_radio) values('不能承受的生命之轻','[捷克] 米兰·昆德拉 ','朝向媚俗的一次伟大的进军','8.5')
insert into book (book_name,book_author,book_quote,book_radio) values('红楼梦','[清] 曹雪芹 著 ','谁解其中味?','9.5')
insert into book (book_name,book_author,book_quote,book_radio) values('梦里花落知多少','郭敬明 ','只是青春留下的余烬','7.2')
insert into book (book_name,book_author,book_quote,book_radio) values('达·芬奇密码','[美] 丹·布朗 ','一切畅销的因素都有了','8.2')
insert into book (book_name,book_author,book_quote,book_radio) values('看见','柴静 ','在这里看见中国','8.8')
insert into book (book_name,book_author,book_quote,book_radio) values('百年孤独','[哥伦比亚] 加西亚·马尔克斯 ','尼采所谓的永劫复归,一场无始无终的梦魇','9.2')
insert into book (book_name,book_author,book_quote,book_radio) values('1988:我想和这个世界谈谈','韩寒 ','车手韩寒的公路小说','7.9')
insert into book (book_name,book_author,book_quote,book_radio) values('何以笙箫默','顾漫 ','倒追有风险,入行需谨慎','8.0')
insert into book (book_name,book_author,book_quote,book_radio) values('平凡的世界(全三部)','路遥 ','中国当代城乡生活全景','9.0')
insert into book (book_name,book_author,book_quote,book_radio) values('简爱','[英] 夏洛蒂·勃朗特 ','灰姑娘在十九世纪','8.5')
insert into book (book_name,book_author,book_quote,book_radio) values('哈利·波特与魔法石','[英] J. K. 罗琳 ','羽加迪姆勒维奥萨!','9.0')
insert into book (book_name,book_author,book_quote,book_radio) values('飘','[美国] 玛格丽特·米切尔 ','革命时期的爱情,随风而逝','9.3')
insert into book (book_name,book_author,book_quote,book_radio) values('三体Ⅱ','刘慈欣 ','无边的黑暗森林,比第一部更为恢弘壮丽','9.2')
insert into book (book_name,book_author,book_quote,book_radio) values('白夜行','东野圭吾 ','封面剧透','9.2')
insert into book (book_name,book_author,book_quote,book_radio) values('送你一颗子弹','刘瑜 ','犀利又温柔,穿过胸口隐隐作痛','8.6')
insert into book (book_name,book_author,book_quote,book_radio) values('三体Ⅲ','刘慈欣 ','终章,何去何从','9.2')

三、实现多页爬取并存入数据

完整代码:

import requests
import pymysql
conn=pymysql.connect(host='localhost',port=3306,user='root',passwd='',db='test')
conn.set_charset('utf8')
cursor=conn.cursor()

from lxml import etree
url='https://book.douban.com/top250?start={}'
def get_data(page):
    res=requests.get(url.format(str(page)))
    html=etree.HTML(res.text)
    book_names=html.xpath('//div[@class="pl2"]/a/@title')
    book_authors=html.xpath('//p[@class="pl"]/text()')
    book_ratios=html.xpath('//span[@class="rating_nums"]/text()')
    book_quotes=html.xpath('//span[@class="inq"]/text()')

    authors=[]
    for i in book_authors:
        authors.append(i.split('/')[0])
    return zip(book_names,authors,book_quotes,book_ratios)

    
for i in range(10):
    data=get_data(i*25)
    for j in data:
        sql="insert into book (book_name,book_author,book_quote,book_radio) values('%s','%s','%s','%s')"%j
        cursor.execute(sql)
    print('存取第一%d页'%i+1)

四、爬取效果:

成果

五、总结:

这篇文章主要自己百度然后去学习pymysql的使用以及xpath语法,然后实现的小demo,总结下以备今后查阅

上一篇下一篇

猜你喜欢

热点阅读