Python基本爬虫(HTML解析器)
2017-12-12 本文已影响0人
原来不语
# -*-encoding:utf-8 -*-
import re
import urllib
from bs4 import BeautifulSoup
class HtmlParse(object):
"""docstring for HtmlParse"""
def parse(self,page_url,html_cont):
'''
用于解析网页内容,抽取url和数据
'''
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont,'html.parser')
new_urls = self._get_new_urls(page_url,soup)
new_data = self._get_new_data(page_url,soup)
#print(new_urls,new_data)
return new_urls,new_data
def _get_new_urls(self,page_url,soup):
"""
抽取新的URL集合
"""
new_urls = set()
#抽取符合条件的a标记
links = soup.find_all('a',href=re.compile(r'/view/\d+\.html'))
print(links)
for link in links:
#提取href属性
new_url = link['href']
#拼接网址
new_full_url =urllib.urlparse.urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self,page_url,soup):
"""
提取有效数据
"""
data = {}
data['url'] = page_url
title = soup.find('dd',class_='lemmaWgt-lemmaTitle-title').find('h1')
data['title'] = title.get_text()
summary = soup.find('div',class_= 'lemma-summary')
#获取tag中包含的所有文本内容,包括子孙tag中的内容,并将结果作为Unicode字符返回
data['summary'] = summary.get_text()
#print(data)
return data