爬虫代理

2022-01-06  本文已影响0人  无量儿

https://blog.csdn.net/LawLietW/article/details/122005185


import time
import random
import requests


USER_AGENTS = [
    "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
    "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
    "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]

headers = {
   "User-Agent": ""
}
# 借助上面的USER-AGENT反爬
s = requests.session()
s.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 10

url = "https://baike.baidu.com/item/人工智能/9180?fromtitle=AI&fromid=25417&fr=aladdin"
for i in range(10):
    proxys = {
    # news_ip是已经读取好的ip 就不放上面代码了
        "https": "http://"+ new_ips[i],
        "http": "http://" + new_ips[i]
    }
    headers['User-Agent'] = random.choice(USER_AGENTS)
    print(proxys)
    print(headers['User-Agent'])
    req = requests.get(url, headers=headers, verify = False, proxies = proxys, timeout = 20).content.decode('utf-8')
    print(req)
    
    time.sleep(5)

上一篇 下一篇

猜你喜欢

热点阅读