【Python】解析网页BeautifulSoup工具包安装及介

2018-08-26  本文已影响28人  Natsuka
基础回顾
BeautifulSoup
BeautifulSoup的安装
BeautifulSoup支持的解释器
解析器 使用方法 优势 劣势
Python标准库 BeautifulSoup(markup, 'html.parser') (1)Pyhton的内置标准库(2)执行速度适中(3)文档存储能力强 (1)Python2.7.3 or 3.2.2前的版本中文档容错能力差
lxml HTML 解析器 BeautifulSoup(markup, 'lxml') (1)执行速度快(2)文档容错能力强 需要安装C语言库
lxml XML解析器 BeautifulSoup(markup, ['lxml','xml'])BeautifulSoup(markup, 'xml') (1)速度快(2)唯一支持XML的解析器 需要安装C语言库
htmlSlib BeautifulSoup(markup, 'htmlSlib') (1)最好的容错性 (2)以浏览器的方式解析文档 (3)生成HTMLS格式的文档 (1)速度慢 (2)不依赖外部扩展
BeautifulSoup模块的导入和基本应用
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
print(r)
print(r.text)
print(type(r.text))
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup)
print(soup.prettify())
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.head)
print(soup.title)
print(soup.a)
print(soup.p)

print(type(soup.title))
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.title.name)
print(type(soup.title.name))
print(soup.a.name)
print(soup.p.name)

print(soup.title.attrs)
print(type(soup.title.attrs))
print(soup.a.attrs)
print(soup.p.attrs)

# 查看特殊属性
print(soup.a.attrs['style'])
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.title.string)
print(type(soup.title.string))
print(soup.a.string)
print(soup.p.string)

print(soup.head)
print(soup.head.string)
print(soup.head.text) # text 直接输出str,并且可以不仅仅只针对单个标签
分析文档树
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.head.contents)
print(type(soup.head.contents))
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

#print(soup.head.children)
print(type(soup.head.children))
for i in soup.head.chlidren:
    print(i)
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.body.descendants)
print(type(soup.body.descendants))
for i in soup.body.descendants:
    print(i)
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.title)
print(type(soup.title))
print(soup.title.parent)
print(type(soup.title.parent))
print(soup.title.parent.name)
print(soup.title.parent.attrs)
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

a = soup.body.a

for i in a.parents:
    print(i.name)

*兄弟节点
.next_silbling .previous_silbling
兄弟节点可以理解为和本节点在统一级的节点

import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.p.next_silbling)

print(soup.p.previous_silbling)
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

for i in soup.p.next_silblings:
    print(i)
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.head.previous_element.name)
print(soup.head.previous_element)
遍历所有标签的方法
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.find_all('title'))
print(soup.find_all('meta'))

print(soup.find_all('img'))
print(soup.find_all('img','sspLogo'))
print(soup.find_all('img',limit=2)) # limit参数,返回几个。
print(soup.find_all('img',height='20')) # keyword参数,高度为20的图。
import requests
import re
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

for i in soup.find_all('a',href=re.compile('news.qq.com/a/201605')):
    print(i,type(i))
    print(i.text)
    print(i.attrs['href'])
    print('\n')
import requests
from bs4 import BeautifulSoup

r = requests.get(url='https://news.qq.com/a/20170205/019837.htm')
soup = BeautifulSoup(r.text,'lxml')

print(soup.find('a'))
print(type(soup.find('a')))
print(soup.find('a').text)
上一篇 下一篇

猜你喜欢

热点阅读