慕课笔记--python原生爬虫“笔记”
2018-10-08 本文已影响25人
敬楠
之所以叫“笔记”,并非原创,只是总结。原创作者是慕课老师--七月,很喜欢他的课买了他很多课(打个广告)。仅供学习交流。
爬虫分析
熊猫tv.png----需求:对一个直播网站的某个分类模块人气进行排名。
----分析:对图中红色区域“video-nickname”、“video-number”进行抓取和排序。
----代码实现:这里是对正则表达式的及python语法的一些考核。
具体代码实现
第一阶段:发起网络请求请求到所需分析的html页面
# 引入网络框架
from urllib import request
class Spider:
url = 'https://www.panda.tv/cate/lol?pdt=1.24.s1.3.4jhlkpnlco3'
#第一阶段:发起网络请求请求到所需分析的html页面
def __fetch_content(self):
r = request.urlopen(Spider.url)
html = r.read()
html = str(html, encoding='utf-8')
print(html)
def run(self):
self.__fetch_content()#第一阶段
spider = Spider()
spider.run()
第二阶段:分析这个html页面,提取到我们所要关注的标签
# 引入网络框架\正则框架
from urllib import request
import re
class Spider:
url = 'https://www.panda.tv/cate/lol?pdt=1.24.s1.3.4jhlkpnlco3'
root_pattern = '<div class="video-info">([\s\S]*?)</div>'
#第二阶段
# \s 匹配空白字符,\S 匹配非空白字符 []字符集
# *匹配*号前面的字符0次或跟多次 ?非贪婪 匹配到<d就结束了
def __fetch_content(self):
r = request.urlopen(Spider.url)
html = r.read()
html = str(html, encoding='utf-8')
return html
#第二阶段:分析这个html页面,提取到我们所要关注的标签
def __analysis(self, html):
root_html = re.findall(Spider.root_pattern, html)
print(root_html[0])
def run(self):
html = self.__fetch_content()
self.__analysis(html)
spider = Spider()
spider.run()
获得到结果:
获得所需.png
第三阶段:获取“video-nickname”、“video-number”
# 引入网络框架\正则框架
from urllib import request
import re
class Spider:
url = 'https://www.panda.tv/cate/lol?pdt=1.24.s1.3.4jhlkpnlco3'
root_pattern = '<div class="video-info">([\s\S]*?)</div>'
name_pattern = '</i>([\s\S]*?)</span>'
# number_pattern = '<span class="video-number">15.1万</span>'
number_pattern = '<span class="video-number">([\s\S]*?)</span>'
# \s 匹配空白字符,\S 匹配非空白字符 []字符集
# *匹配*号前面的字符0次或跟多次 ?非贪婪 匹配到<d就结束了
def __fetch_content(self):
r = request.urlopen(Spider.url)
html = r.read()
html = str(html, encoding='utf-8')
return html
def __analysis(self, html):
root_html = re.findall(Spider.root_pattern, html)
anchors = []
for html in root_html:
name = re.findall(Spider.name_pattern, html)
number = re.findall(Spider.number_pattern, html)
anchor = {"name" :name, "number":number }
anchors.append(anchor)
print(anchors[0])
def run(self):
html = self.__fetch_content()
self.__analysis(html)
spider = Spider()
spider.run()
结果
{'name': ['\n 守卫者 ', '\n '], 'number': ['1.9万']}
继续优化:将空格删除
# 精炼我们的函数
def __refine(self, anchors):
express = lambda anchor: {
'name': anchor['name'][0].strip(),
'number': anchor['number'][0]
}
return map(express, anchors)
def run(self):
html = self.__fetch_content()
anchors = self.__analysis(html)
anchors = list(self.__refine(anchors))
print(anchors)
结果
测试结果.png
第四阶段 对抓取的数据进行排序
# 引入网络框架\正则框架
from urllib import request
import re
class Spider:
url = 'https://www.panda.tv/cate/lol?pdt=1.24.s1.3.4jhlkpnlco3'
root_pattern = '<div class="video-info">([\s\S]*?)</div>'
name_pattern = '</i>([\s\S]*?)</span>'
# number_pattern = '<span class="video-number">15.1万</span>'
number_pattern = '<span class="video-number">([\s\S]*?)</span>'
# \s 匹配空白字符,\S 匹配非空白字符 []字符集
# *匹配*号前面的字符0次或跟多次 ?非贪婪 匹配到<d就结束了
def __fetch_content(self):
r = request.urlopen(Spider.url)
html = r.read()
html = str(html, encoding='utf-8')
return html
def __analysis(self, html):
root_html = re.findall(Spider.root_pattern, html)
anchors = []
for html in root_html:
name = re.findall(Spider.name_pattern, html)
number = re.findall(Spider.number_pattern, html)
anchor = {"name": name, "number": number}
anchors.append(anchor)
return anchors
# 精炼我们的函数
def __refine(self, anchors):
express = lambda anchor: {
'name': anchor['name'][0].strip(),
'number': anchor['number'][0]
}
return map(express, anchors)
# 根据number字段值进行排序
def __sort_anchors(self, anchors):
anchors = sorted(anchors, key=self.__sort_seed, reverse=True)
return anchors
def __sort_seed(self, anchor):
r = re.findall("\d*", anchor['number'])
number = float(r[0])
if '万'in anchor['number']:
number *= 10000
return number
def __show(self, anchors):
for anchor in anchors:
print(anchor['name']+'-----'+anchor['number'])
def run(self):
html = self.__fetch_content()
anchors = self.__analysis(html)
anchors = list(self.__refine(anchors))
anchors = self.__sort_anchors(anchors)
self.__show(anchors)
spider = Spider()
spider.run()
测试结果
测试结果.png
最后显示小小的优化:
def __show(self, anchors):
# for anchor in anchors:
# print(anchor['name']+'-----'+anchor['number'])
for rank in range(0, len(anchors)):
print('rank'+str(rank + 1) + "----" +
anchors[rank]['name'] + "----" +
anchors[rank]['number'])
测试结果:
测试结果.png
撒花。。。。花