03_基本库的使用

2019-04-02  本文已影响0人  Arale_zh

urllib库

-parse:解析链接,实现url各部分的抽取、合并、以及链接的转换

```
from urllib import parse
res = parse.urlparse("http:www.baidu.com/index.html;user?id=5#comment")
# 返回结果为元组,res[0],res.scheme
print(res, res[0], res.scheme)  # ParseResult(scheme='http', netloc='', path='www.baidu.com/index.html', params='user', query='id=5', fragment='comment')

data = ["http", "www.baidu.com", "index.html", "user", "d=5", "comment"]
res = parse.urlunparse(data)
print(res)  # http://www.baidu.com/index.html;user?d=5#comment

res = parse.urlsplit("http:www.baidu.com/index.html;user?id=5#comment")
print(res)  # SplitResult(scheme='http', netloc='', path='www.baidu.com/index.html;user', query='id=5', fragment='comment')

data = ["http", "www.baidu.com", "index.html", "d=5", "comment"]
res = parse.urlunsplit(data)
print(res)  # http://www.baidu.com/index.html?d=5#comment

res = parse.urljoin("http://www.baidu.com", "index.html")
print(res)  # http://www.baidu.com/index.html ,注意合并的规则

data= {
    "name":"bob",
    "age":18
}
res = parse.urlencode(data, encoding="utf8")
print(res)  # name=bob&age=18

res = parse.parse_qs("http:www.baidu.com/index.html;user?id=5&name=3")
print(res)  # {'user?id': ['5'], 'name': ['3#comment']}

res = parse.parse_qsl("http:www.baidu.com/index.html;user?id=5&name=3")
print(res)  # [('user?id', '5'), ('name', '3')]

q= "张三"
res = "http:www.baidu.com/index.html?q=%s " % parse.quote(q)  # 编码
print(res)  # http:www.baidu.com/index.html?q=%E5%BC%A0%E4%B8%89
res = parse.unquote(res)  # 解码
print(res)  # http:www.baidu.com/index.html?q=张三
```

requests的使用

  1. 基本用法
    • Get请求 requests.get("url", "params", "headers")
      • url:请求资源的地址
      • params:传入的参数
      • headers:请求头,反扒的一种措施
    • Post请求 requests.post("url", "params", "headers")
      • url:请求资源的地址
      • params:传入的参数
      • headers:请求头,反扒的一种措施
    • 响应属性信息
      • text:返回的文本数据
      • content:二进制格式的数据,例如音频、视频、图片的
      • status_code:返回的状态码
      • headers:返回的响应头
      • cookies:cookies信息
      • url:请求的url信息
      • history: 请求历史
     import requests
    
     # get请求 requests.get(url, params, headers)
     url = "http://httpbin.org/get"  # 请求的url
     # 请求参数
     params = {
         "name":"Arale",
         "age":25
     }
     # 请求头
     headers = {
         "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36"
     }
     # 发送get请求,得到响应
     resp = requests.get(url, params=params, headers=headers)
     print(resp.url)  # 打印请求的url 参数在url http://httpbin.org/get?name=Arale&age=25
     
     # post请求 requests.post(url, data, headers)
     url = "http://httpbin.org/post"  # 请求的url
     # 发送post请求
     resp = requests.post(url, data=params, headers=headers)
     print(resp.url)  # 打印请求的url 参数在请求体中 http://httpbin.org/post
     
     # 响应属性方法
     print(resp.text)  # 打印str文本数据
     print(resp.content)  # 处理二进制数据,例如图片
     print(resp.url)  # http://httpbin.org/post
     print(resp.headers)  # {'Access-Control-Allow-Credentials': 'true', 'Access-Control-Allow-Origin': '*', 'Content-Encoding': 'gzip', 'Content-Type': 'application/json', 'Date': 'Thu, 28 Mar 2019 01:53:32 GMT', 'Server': 'nginx', 'Content-Length': '343', 'Connection': 'keep-alive'}
     print(resp.cookies)  # <RequestsCookieJar[]>
     print(resp.status_code)  # 200
     print(resp.history)  # []
    
  2. 高级用法
    • 上传文件
     # 上传文件
     files = {
         "file": open("01_urllib的使用.py", "rb")
     }
     resp = requests.post(url, files=files)  # 注意key必须为files否则报错
     print(resp.text)  # 返回内容会多一个files字段
    
    • cookies的使用
      • 以简书为例,写文章的页面需要登陆后才能看见,拿到网站登录后的cookie,再去请求,以达到维持登录状态的效果。
     # cookie的使用
     headers = {
         # 此cookie为登录网站后的cookie,
         "Cookie": "Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1553742403; sajssdk_2015_cross_new_user=1; locale=zh-CN; read_mode=day; default_font=font2; remember_user_token=W1sxMjE1NTM2Ml0sIiQyYSQxMSRmZEdzaHlpLnFsYnZpMG9PbFRQLk91IiwiMTU1Mzc0MjQxMC44MTg2OTc3Il0%3D--48708ad37562cd9a12cfaac066b92cc24e4305d3; _m7e_session_core=167a540dc0e51fd3bb10e0e502e174de; __yadk_uid=8uaAcl2jljk5KfYwGemwVKFoMN89sBuC; Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1553742450; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22169c243959d13-00d4259c8ce7dd-7a1b34-1296000-169c243959e606%22%2C%22%24device_id%22%3A%22169c243959d13-00d4259c8ce7dd-7a1b34-1296000-169c243959e606%22%2C%22props%22%3A%7B%7D%2C%22first_id%22%3A%22%22%7D",
         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36",
         "Referer": "https://www.jianshu.com/writer"
     }
     url = "https://www.jianshu.com/writer#/notebooks/35136025/notes/43035331"
     resp = requests.get(url, headers=headers)
     print(resp.status_code)  # 设置cookie,可以访问需要登录才能看见的页面200
     # 查看cookie,并可以得到cookie的key value
     cookie = requests.get("https://www.baidu.com/").cookies
     print(cookie)
     for k, v in cookie.items():
         print(k + "=" + v)
    
    • session会话维持
      • Session():用来维持同一个会话,使用其实例化的对象进行请求
       # session:维持会话
        session = requests.Session()  # 实例化session类,使请求为同一示例,即维持同一会话
        session.get("https://www.httpbin.org/cookies/set/arale/123456")
        resp = session.get("https://www.httpbin.org/cookies")
        print(resp.text)
      
    • ssl证书认证
      • requests中有个参数verify,默认weiTrue,会自动验证网站的证书
      # ssl证书的验证,verify参数默认为True
        try:
            resp = requests.get("https://inv-veri.chinatax.gov.cn/")
            print(resp.status_code)  # requests.exceptions.SSLError会报错,默认会验证CA证书
        except Exception as e:
            # from requests.packages import urllib3
            # urllib3.disable_warnings()  # 忽略警告
            import logging
            logging.captureWarnings(True)  # 捕获警告到日志的方式忽略警告
            resp = requests.get("https://inv-veri.chinatax.gov.cn/", verify=False)  # 不去验证证书
            print(resp.status_code)
      
    • 代理的设置
    # 代理ip的设置
     proxies = {
         "http":"http://47.107.227.104:8888",
     }
     resp = requests.get("https://www.baidu.com/", proxies=proxies)
     print(resp.status_code)
    
    • 超时设置
    # 设置超时
     try:
         resp = requests.get("https://www.baidu.com/", timeout=0.001)
         print(resp.status_code)  # requests.exceptions.ConnectTimeout
     except Exception as e:
         print(e)
    
    • 身份认证
    # 传入auth=(元组)
    resp = requests.get("url", auth=("user", "pwd")
    
    • prepared requests对象
      • 引入Request对象
        s = requests.Session()  # 实例化session对象
        req = requests.Request("get", "https://www.baidu.com/")  # 构造请求对象 
        pre = s.prepare_request(req)  # 通过session准备请求数据
        resp = s.send(pre)  # 发送请求数据,返回响应
        print(resp.status_code)
      

正则表达式

import re

str = "Extra stings Hello 123456 world_This is a Regex Demo Exrea stings"

# + 表示一个或多了
# .*?表示非贪婪模式
# \w{10}匹配字母数字下划线10个,{4,10}贪婪模式,会取10个
# ()分组字符串提取
result = re.match(r"^extra.*?(\d+)\s(\w{10})", str, re.I)

print(result.group())  # 匹配结果的内容
print(result.span())  # 结果字符串在原字符串中的位置范围
print(result.group(1))  # 取出第一个分组结果
print(result.group(2))

爬取猫眼电影

import requests
import re
import time
import xlwt


# 爬取网页
def get_page(url, headers):
    resp = requests.get(url, headers)
    return resp.text


# 解析返回的结果
def parse_res(res):
    '''<dd>
        <i class="board-index board-index-1">1</i>
        <a href="/films/1203" title="霸王别姬" class="image-link" data-act="boarditem-click" data-val="{movieId:1203}">
          <img src="//s0.meituan.net/bs/?f=myfe/mywww:/image/loading_2.e3d934bf.png" alt="" class="poster-default">
          <img data-src="https://p0.meituan.net/movie/223c3e186db3ab4ea3bb14508c709400427933.jpg@160w_220h_1e_1c" alt="乱世佳人" class="board-img" />
        </a>
        <div class="board-item-main">
          <div class="board-item-content">
            <div class="movie-item-info">
                <p class="name"><a href="/films/1203" title="霸王别姬" data-act="boarditem-click" data-val="{movieId:1203}">霸王别姬</a></p>
                <p class="star">
                    主演:张国荣,张丰毅,巩俐
                </p>
                <p class="releasetime">上映时间:1993-01-01</p>
            </div>
            <div class="movie-item-number score-num">
                <p class="score">
                    <i class="integer">9.</i>
                    <i class="fraction">5</i>
                </p>
            </div>
          </div>
        </div>
       </dd>
    '''
    par = re.compile(r'<dd>.*?'
                     r'<i.*?>(\d+)</i>'
                     r'.*?data-src="(.*?)"'
                     r'.*?class="name"><a .*?>(.*?)</a>'
                     r'.*?class="star">(.*?)</p>'
                     r'.*?releasetime">(.*?)</p>'
                     r'.*?class="integer">(.*?)</i>'
                     r'.*?class="fraction">(.*?)</i>'
                     r'.*?</dd>', re.S)
    items = re.findall(par, res)  # 返回list
    for item in items:
        # 利用生成器返回数据,或者构造元组,存放在list中用return返回
        # TODO 返回这样格式是写入excel需要
        yield [
            item[0],
            item[1],
            item[2],
            item[3].strip()[3:],
            item[4][5:],
            item[5]+item[6]
        ]


# 写入文件
def write_to_excel(items):
    # 创建一个excel
    excel = xlwt.Workbook()
    # 添加一个工作区
    sheet = excel.add_sheet("电影排名")
    # 构造表头信息
    head = ["序号", "海报", "名称", "主演", "上映时间", "评分"]
    # 将头部信息写入excel表头
    for index, value in enumerate(head):
        sheet.write(0, index, value)
    # 将内容写入excel
    for row, item in enumerate(items, 1):  # 行数据
        for col in range(0, len(item)):  # 列数据
            sheet.write(row, col, item[col])

    excel.save("./猫眼电影排名.xlsx")


# 主程序入口
def main(offset):
    url = "https://maoyan.com/board/4?offset=" + str(offset)
    headers = {
        "User-Agent": '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) 
                        AppleWebKit/537.36(KHTML, like Gecko) Chrome
                        /73.0.3683.75 Safari/537.36'''
    }
    res = get_page(url, headers)  # 请求url
    items = parse_res(res)  # 解析结果,返回生成器
    print(items.__next__())  # python3改为__next__(),之前版本为next()
    return items  # 返回解析后的结果,生成器



if __name__ == "__main__":
    items = []
    offset = None
    for i in range(0, 10):
        item= main(i*10)  # 分页爬取
        items += list(item)  # 将每页结果进行拼接
        time.sleep(1)  # 每页休眠一秒钟,反扒措施
    write_to_excel(items)  # 将所有结果一次性写入文件,不一次一次写,因为会覆盖
上一篇 下一篇

猜你喜欢

热点阅读