python爬取豆瓣演员图片
2018-03-28 本文已影响0人
菜鸟平
本文为使用python3通过豆瓣演员id下载演员图片
1. 该类为读取Excel表格数据
"""
-------------------------------------------------
File Name: Do_Excel
Description :
Author : 小鱼
date: 2018/3/19
-------------------------------------------------
Change Activity:
2018/3/19:
-------------------------------------------------
"""
__author__ = '小鱼'
from openpyxl import load_workbook
class DoExcel:
def __init__(self,excelpath):
self.wb = load_workbook(excelpath)
self.excelpath = excelpath
self.sh_id = self.wb.get_sheet_by_name("id")
self.sh_move = self.wb.get_sheet_by_name("move")
def get_id(self):
l=[]
for i in range(1,self.sh_id.max_row+1):
id = self.sh_id.cell(row=i,column=1).value
l.append(id)
return l
"""
-------------------------------------------------
File Name: douban_spider2
Description :
Author : 小鱼
date: 2018/3/20
-------------------------------------------------
Change Activity:
2018/3/20:
-------------------------------------------------
"""
import urllib
__author__ = '小鱼'
from Do_Excel import DoExcel
import requests
import re
import time
from urllib.request import urlretrieve, urlcleanup
import os
class DouBan_Spider():
def auto_down(self,url, filename):
try:
urlretrieve(url, filename)
except :
print("重新下载")
urlcleanup()
return self.auto_down(url,filename)
def judge_ban(self, id):
move_url = "https://movie.douban.com/celebrity/%s" % id
try:
response = requests.get(url=move_url, timeout=30)
except:
time.sleep(10)
print('请求多次仍然超时,请检查')
return self.judge_ban(id)
html = response.text
if html.find(r'检测到有异常请求从你的') != -1:
print("被ban了,需要等待10s继续请求")
time.sleep(10)
return self.judge_ban(id)
else:
picture = r'''title="点击看大图"
src="(.*?)">'''
imgre = re.compile(picture)
imgrelist = imgre.findall(html)
if imgrelist!=[]:
self.auto_down(imgrelist[0],os.getcwd()+'/picture/%s.jpg'%id)
print("当前演员id为:" + str(id))
print('.......................')
else:
print("该演员没有头像")
print('.......................')
if __name__ == '__main__':
m = DouBan_Spider()
do_excel = DoExcel('move_id.xlsx')
id_list = do_excel.get_id()
count = 0
for m_id in id_list:
count += 1
print('当前下载了' + str(count) + '张')
m.judge_ban(m_id)
备注:
- 该脚本实现了通过演员id下载演员头像的功能,如果被ban,则过10秒重新请求
- 读取Excel表格最大行数max_row,应该+1,不然无法取到最后一行
2.结果如图
image.png备注
import urllib.request
urlretrieve()的应用,可以将网站网址直接爬取到本地中
格式: request.urlretrieve(url,filename) url为 要爬取的网站的地址,filename为本地的名。
urlcleanup()的应用,可以将urlretrieve()中的缓存清理掉