爬虫常用代码

2018-09-29  本文已影响0人  Ziger丶

去除字符串中的空值 &\xa0。

i = list(i.split(' '))
i =''.join(i)

i = i.replace(u'\xa0', u'')

爬虫时防止IP访问限制

requests.adapters.DEFAULT_RETRIES = 5  # 增加重连次数
r = requests.session()
r.keep_alive = False  # 关闭多余连接

查看IP是否可用

proxies = {
    'http': 'http://183.129.207.77',
    'https': 'https://203.130.46.108'
}

url = 'http://www.jjwxc.net/'
try:
    page = requests.get(url,timeout = 10, proxies=proxies)
except:
    print('失败')
else:
    print('成功')

获取 西刺 随机IP

from bs4 import BeautifulSoup

def get_ip_list():    
    url = 'http://www.xicidaili.com/nn/'    
    headers = {        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36"    }    
    web_data = requests.get(url, headers=headers)    
    soup = BeautifulSoup(web_data.text, 'lxml')    
    ips = soup.find_all('tr')    
    ip_list = []    
    for i in range(1, len(ips)):        
        ip_info = ips[i]        
        tds = ip_info.find_all('td')        
        ip_list.append(tds[1].text + ':' + tds[2].text)    
    return ip_list


def get_random_ip(ip_list):    
    proxy_list = []    
    for ip in ip_list:        
        proxy_list.append('http://' + ip)    
        proxy_ip = random.choice(proxy_list)    
        proxies = {'http': proxy_ip}    
    return proxies

获取 快代理 IP

def IP_pool ():
    from bs4 import BeautifulSoup
    IP = []
    IPS =[]
    for i in range(1,11):
        url = 'https://www.kuaidaili.com/free/inha/' + str(i) +'/'
        try:
            r = requests.get(url,timeout = 10)
            soup = BeautifulSoup(r.text, 'lxml')    
            ips = soup.find_all('td',attrs={'data-title':'IP'})
            IP.append(ips)
            print('在爬取第{}页'.format(i))
            time.sleep(3)
        except :
            print('第{}页爬取失败'.format(i))
            continue
    for j in IP:
        if j !=[]:
            for k in j:
                k = re.findall(r'"IP">(.*?)<',str(k))
                IPS.append(k[0])
    return (IPS)

检测IP池中可用的IP

# 定制请求头
headers = {
    'Cookie':'area_region=2; goodsId=2464; area_region=2; goodsId=2464; ECS_ID=664f1cd1f37ba6bfa6bedf430b2c0d1096b2f969; ECS[visit_times]=1; session_id_ip=221.237.152.174_664f1cd1f37ba6bfa6bedf430b2c0d10; area_region=2; goodsId=1463; ECS[history]=2464%2C1463%2C1464%2C1648%2C1312%2C2335%2C1332%2C1235%2C1335%2C1333%2C1334; ECS[list_history]=2464%2C1463%2C1464%2C1648%2C1312%2C2335%2C1332%2C1235%2C1335%2C1333%2C1334; _ga=GA1.2.46935259.1537932797; _gid=GA1.2.16826347.1537932797; _gat_gtag_UA_125099464_1=1; Hm_lvt_0c5d16c4fdfede265f1fe61f241c5c3a=1537932797; Hm_lpvt_0c5d16c4fdfede265f1fe61f241c5c3a=1537947312; province=26; city=322; district=2722',
    'Host':'www.hejiaju.com',
    'Upgrade-Insecure-Requests':'1',
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
    }
url = 'http://www.hejiaju.com/goods/2167.html/'

def test_ip (url,headers):
    s_ip = []
    for i in IPS :
        ip =i
        proxies = {"http": "http://"+ip, "https": "http://"+ip}  # 代理ip
        try:
            response = requests.get(url,proxies=proxies,headers=headers,timeout=5).status_code
            if response == 200 :
                s_ip.append(ip)
                print (ip + 'True')
            else:
                print ('False')
        except:
            print ('False')
    return (s_ip)

随机获取headers

headerstr = '''Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)
Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)
Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1
Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11
Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11
Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)
Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'''

def headerChange():
    headerList = headerstr.split('\n')
    length = len(headerList)
    return headerList[random.randint(0,length - 1)]

下载图片

def JPG (X,columns):
    import os
    number = 1
    for i in X[columns]:
        url = i
        #图片存储的地址
        root = "G://python//" 
        path = root + url.split("/")[-1]
        try:
            if not os.path.exists(root):
                os.mkdir(root)
            if not os.path.exists(path):
                r = requests.get(url,timeout = 10)
                r.raise_for_status()
                #使用with语句可以不用自己手动关闭已经打开的文件流
                with open(path,"wb") as f: #开始写文件,wb代表写二进制文件
                    f.write(r.content)
                print("爬取完成" + '---'*20 +str(number))
                number += 1
            else:
                print("文件已存在")
        except Exception as e:
            print("爬取失败:"+str(e))
    return ('爬取完成')

上一篇 下一篇

猜你喜欢

热点阅读