一起学python

构建IP代理池爬取妹子图片

2016-12-15  本文已影响1532人  蜗牛仔

同一个目录下创建三个文件

Paste_Image.png
下载链接用的是haoIP网的ip,网址是http://haoip.cc/tiqu.htm
剩下一个ip池用的是西刺提供的ip构建的
这是meizi.py爬虫文件的代码
import os
import requests
import re
import time
from ip_request import html_request
from download import download_request
from bs4 import BeautifulSoup
class meizi2():
    def get_links(self,url):

        contnet = self.request(url)
        all_data = contnet.find('div', {'class': 'all'}).findAll('a')#找到每个套图的链接以及标题
        for data in all_data:
            title = data.get_text()#提取标题
            a = r"[\/\\\:\*\?\"\<\>\|]"  # 正则表达式去除不能写入文件名的符号
            title_clear1 = re.sub(a,'', title)

            self.mkir(title_clear1)#根据每套图创建文件夹
            os.chdir('C:\\Users\\admin\\Desktop\\mzitu\\{}'.format(title))#切换到这个文件夹
            url=data['href']
            self.img_urls(url)




    def img_urls(self,url):#获取每个图片链接的函数
        n=1

        html = self.request(url)
        max_page = html.find_all('span')[10].get_text()#每个最大的页数刚好在第十个span标签里面
        #print(max_page)
        for page in range(1,int(max_page)+1):#找出每个套图里面最大的页数,这里真的不得不佩服我曹哥的想法
            link = url+'/'+ str(page)

            data=self.request(link)

            img_link=data.find('div', class_='main-image').find('img')['src']

            #img_link = data.select('body > div.main > div > div > p > a > img')[0]['src']#找到每张图片的链接
            #因为返回的是list,所以要先用【0】选取,在提取src地址
            #print(img_link)
            self.download(img_link)
            print('成功下载{}张'.format(n))
            n+=1#提醒下载了多少张
    def download(self,url):#针对每个图片地址的下载函数
        f=open(url[-9:-4]+'.jpg','ab')
        #必须以二进制写进去
        f.write(download_request.get(url,3))



        f.close()
    def request(self,url):#请求函数,这里用了很多次情况,封装一下比较方便
        #headers={
            #'User-Agent':'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5'
        #}
        html=html_request.get(url,3)
        data=BeautifulSoup(html,'lxml')
        return data
    def mkir(self,path):#创建文件函数
        isExist = os.path.exists(os.path.join('C:\\Users\\admin\\Desktop\\mzitu',path))#检验这个目录下,path这个文件夹存在吗,
        #不存在就创建
        if not isExist:
            print('创建一个{}的文件'.format(path))
            os.makedirs(os.path.join('C:\\Users\\admin\\Desktop\\mzitu',path))
            return True
        else:
            print('文件{}已经存在'.format(path))
            return  False
meizi=meizi2()
meizi.get_links('http://www.mzitu.com/all')

这个是download构建的ip代理池

import requests
import re
import random
import time

class download():
    def __init__(self):
        self.ip_list=[]
        html = requests.get('http://haoip.cc/tiqu.htm')
        all_ip = re.findall(r'r/>(.*?)<b', html.text, re.S)
        for i in all_ip:
            ip = re.sub('\n', '', i)
            self.ip_list.append(i.strip())
            #print(ip.strip())
        self.user_agent_list=[
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
    def get(self,url,timeout,proxy=None,num_retries=6):
        ran_agent=random.choice(self.user_agent_list)
        headers={'User-Agent':ran_agent}
        if proxy ==None:
            try:

                return requests.get(url, headers=headers, timeout=timeout).content

            except:
                if int(num_retries)>0:

                    time.sleep(8)
                    print(u'获取网页出错,8S后将获取倒数第:', num_retries, u'次')
                    return self.get(url,timeout,int(num_retries)-1)
                else:
                    print(u'开始使用代理')
                    time.sleep(8)
                    ip=''.join(str(random.choice(self.ip_list)).strip())
                    proxy={'http':ip}
                    return self.get(url,timeout,proxy=proxy)
        else:

            try:

                return requests.get(url,headers=headers,proxies=proxy,timeout=timeout).content
            except:
                if num_retries>0:
                    time.sleep(8)
                    ip=''.join(str(random.choice(self.ip_list)).strip())
                    proxy = {'http': ip}
                    print(u'正在更换代理,8S后将重新获取倒数第', num_retries, u'次')
                    print(u'当前代理是:', proxy)
                    return self.get(url, timeout, proxy, int(num_retries)-1)
                else:
                    print(u'代理也不好使了!取消代理')
                    return self.get(url, 3)


download_request=download()

最后这个是利用西刺代理的IP构造的IP池

import re
import requests
import random
import time
# all_ip=[]
# html = requests.get('http://api.xicidaili.com/free2016.txt')
# #all_ip = re.findall(r'r/>(.*?)<b', html.text, re.S)
# #print(html.text.split('\n'))
# for ip in html.text.split('\n'):
#     all_ip.append(ip.replace('\r',''))
#
# #all_ip.append(html.text.replace('\r','').replace('\n',''))
# print(all_ip)
class download():
    def __init__(self):
        self.all_ip = []
        html = requests.get('http://api.xicidaili.com/free2016.txt')
        for ip in html.text.split('\n'):
            self.all_ip.append(ip.replace('\r', ''))
        self.user_agent_list = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
    def get(self,url,timeout,proxy=None,num_retries=6):
        ran_user=random.choice(self.user_agent_list)
        headers={'User-Agent':ran_user}

        if proxy == None:
            try:

                return requests.get(url,headers=headers,timeout=timeout).text
            except:
                if num_retries>0:
                    time.sleep(10)
                    print(u'获取网页出错,剩余尝试',num_retries ,u'次')
                    return self.get(url,timeout,int(num_retries)-1)
                else:
                    print(u'开始使用代理IP')
                    ip=''.join(str(random.choice(self.all_ip)).strip())
                    proxy={
                        'http':ip
                    }
                    return self.get(url,timeout,proxy=proxy,)

        else:
            try:
                return requests.get(url,headers=headers,timeout=timeout,proxy=proxy).text
            except:
                if num_retries>0:
                    time.sleep(10)
                    print(u'正在更换ip,剩余',num_retries,u'次')
                    ip=''.join(str(random.choice(self.all_ip)).strip())
                    print(u'当前代理是:', proxy)
                    proxy = {
                        'http': ip
                    }
                    #num_retries-=1
                    return self.get(url, timeout, proxy, int(num_retries) - 1)
                    #return self.get(url,timeout,proxy=proxy)
                else:
                    print(u'代理也不好用了啊,取消代理')
                    return self.get(url,10)
html_request=download()

还有我不懂,我只构造一个IP代理池的时候中间老是断,不知道什么原因,希望大神解答,代码注释等有时间再打上去吧
效果图展示,这次真的好多套了,目前还在爬

Paste_Image.png
上一篇下一篇

猜你喜欢

热点阅读