Python爬虫抓取PVideo中豆瓣TOP250电影资源
2017-01-25 本文已影响65人
石头的书桌
#top250 电影资源
import requests
from bs4 import BeautifulSoup
import time
import codecs
import re
#生成一个包含电影资源页链接list
def movie_page_list():
list = []
for i in range(1,11):
url = 'http://www.181bt.com/movie/top250_douban?page=' + str(i)
print('正在解析:' + url)
try:
list.append(find_movie_page(url))
except:
return list
return list
# http://www.181bt.com/movie/top250_douban?page=1
# 中找到真实页面http://www.181bt.com/movie/912.html
def find_movie_page(url):
# 此时PVideo链接为'http://www.181bt.com/'
home_link = 'http://www.181bt.com/'
data = requests.get(url).content
html_bs = BeautifulSoup(data)
#
#
# 搜索页面
# 搜素页面列表
html_list = []
for a in html_bs.findAll('h4'):
search_link = home_link + a.find('a')['href']
html_list.append(search_link)
# 在搜索页面中找到真实页面
movie_true_link_list = []
for i in html_list:
movie_true_data = requests.get(i).content
movie_true_bs = BeautifulSoup(movie_true_data)
movie_true_bs_result = movie_true_bs.findAll('div', {'class': 'result-item'})
for a in movie_true_bs_result:
movie_true_bs_find = a.find('a', {'target': '_blank'})
movie_true_link = movie_true_bs_find['href']
movie_true_link_list.append(movie_true_link)
return movie_true_link_list
# 解析电影资源页面内数据
def parse_movie_link(url):
# 源代码中网址变化
# 搜索结果中的链接: http://www.id97.com/movie/201610.html
# 实际电影链接: http://www.181bt.com/movie/201610.html
# HXR: http://www.181bt.com/videos/resList/202876
pattern_url = re.compile('http://www.id97.com/.*?/(\d+).html')
url_num_match = pattern_url.match(url)
url_num_response = url_num_match.group(1)
url_response = 'http://www.181bt.com/videos/resList/' + str(url_num_response)
print(url_response)
data = requests.get(url_response).content
html_bs = BeautifulSoup(data)
# 下载框find
download_blank_find = html_bs.find('table', {'class': 'table table-hover'})
links = []
try:
for i in download_blank_find.findAll('tr'):
if i.find('td', {'align': 'center'}).find('span').get_text() == '网盘':
link_find = i.find('div', {'style': 'height:1.5em;line-height:1.5;overflow:hidden;'})
link_wangpan = link_find.find('a', {'target': '_blank'})['href'] + link_find.find('a', {'target': '_blank'}).get_text() +link_find.find('strong', {'style': 'color:red;'}).get_text()
#
links.append(link_wangpan)
else:
download_link_find = i.find('a', {'rel': 'nofollow'})
links.append(download_link_find.get_text())
links.append(download_link_find['href'])
#
# print(download_link_find)
except:
pass
# 保存链接
data_movie = requests.get(url.replace('id97', '181bt')).content
html_old_bs = BeautifulSoup(data_movie)
movie_name = html_old_bs.find('h1').get_text() #+ html_bs.find('span', {'class': 'year'}).get_text()
pattern = re.compile('^(.*?) ')
movie_name_match = pattern.match(movie_name)
movie_name_dic = 'F:\\PVideoTop250' +'\\' + movie_name_match.group().replace('/','')
print('正在保存...' + movie_name_match.group())
with codecs.open(movie_name_dic, 'wb', encoding='utf-8') as f:
for a in links:
f.write(a + '\n')
# except:
# pass
def main():
list = []
list.append(movie_page_list())
#前面list写的比较混乱
for i in list:
for i_i in i:
for i_i_i in i:
for i_i_i_i in i_i_i:
parse_movie_link(i_i_i_i)
if __name__ == '__main__':
start=time.time()
main()
end=time.time()
print (end-start)
To enjoy and have fun
石头的书桌 (2).jpg