python基础回顾-简易爬虫【五】
2019-01-05 本文已影响0人
一个热爱生活的好少年
1:页面数据采集:urllib
from urllib import request
#定义
url ='http://www.baidu.com'
#打开url 设置超时链接1s
response = request.urlopen(url,timeout=1)
#输出 读取response 并以utf-8 输出 相应的编码解压(中文):其实就是查看源代码
print (response.read().decode('utf-8'))
2.get与post
from urllib import parse
from urllib import request
#parse处理数据
#传递给url的值
data = bytes(parse.urlencode({'word':'hello'}),encoding='utf8')
#print(data)
response = request.urlopen('http://httpbin.org/post', data=data)
print(response.read().decode('utf-8'))
response2 = request.urlopen('http://httpbin.org/get', timeout=1)
print(response2.read())
# response3 = request.urlopen('http://httpbin.org/get', timeout=0.1)
import urllib
import socket
try:
response3 = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
except urllib.error.URLError as e:
#套接字链接超时
if isinstance(e.reason, socket.timeout):
print('TIME OUT')
3:http头部信息模拟:
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "zh-CN,zh;q=0.8",
"Connection": "close",
"Cookie": "_gauges_unique_hour=1; _gauges_unique_day=1; _gauges_unique_month=1; _gauges_unique_year=1; _gauges_unique=1",
"Referer": "http://httpbin.org/",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER"
}
dict = {
'name': 'value'
}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
4:requests库的请求
# get请求
import requests
url = 'http://httpbin.org/get'
data = {'key': 'value', 'abc': 'xyz'}
# .get是使用get方式请求url,字典类型的data不用进行额外处理
response = requests.get(url, data)
#print(response.text)
# post请求
import requests
url = 'http://httpbin.org/post'
data = {'key': 'value', 'abc': 'xyz'}
# .post表示为post方法
response = requests.post(url, data)
# 返回类型为json格式
print(response.json())
5:爬个页面获取链接
import requests
import re
content = requests.get('http://www.cnu.cc/discoveryPage/hot-人像').text
#print(content)
# < div class ="grid-item work-thumbnail" >
# < a href="(.*?)".*?title">(.*?)</div>
# < div class ="author" > LynnWei < / div >
#.*?这三个组合在一起的含义就是 :0个或者任意个不是\n的任意字符(非贪婪模式,发现一个就立即匹配结束)
#re.S如果不使用re.S参数,则只在每一行内进行匹配,如果一行没有,就换下一行重新开始,不会跨行。而使用re.S参数以后,正则表达式会将这个字符串作为一个整体,
# #将“\n”当做一个普通的字符加入到这个字符串中,在整体中进行匹配
pattern = re.compile(r'<a href="(.*?)".*?title">(.*?)</div>', re.S)
#print(pattern)
#正则 re.findall 的简单用法(返回string中所有与pattern相匹配的全部字串,返回形式为数组
results = re.findall(pattern, content)
print(results)
for result in results:
url, name = result
#/s 替换成空白
print(url, re.sub('\s', '', name))
image.png
6.BeautifulSoup匹配html中的标签以及文本信息
#导入字符串html
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
#安装lxml 功能:查找标签并且获取里面内容(第一个标签/所有标签)
soup = BeautifulSoup(html_doc, 'lxml')
#print(soup.prettify())
#
# # 找到title标签
print(soup.title)
#
# # title 标签里的内容
print(soup.title.string)
# # 找到p标签
print(soup.p)
#
# # 找到p标签class的名字
# print(soup.p['class'])
#
# # 找到第一个a标签
# print(soup.a)
#
# # 找到所有的a标签
# print(soup.find_all('a'))
#
#
# # 找到id为link3的的标签
print(soup.find(id="link3"))
#
# # 找到所有<a>标签的链接
# for link in soup.find_all('a'):
# print(link.get('href'))
#
# # 找到文档中所有的文本内容
#print(soup.get_text())
7.爬取一个新闻网站: