Python_kad爬虫学习笔记(二)
本想着一步到位,把bug都解决后再发布,无奈碰到个问题困扰了好几天,就先发布下目前的版本吧,做下阶段备份,代码如下:
#-*- coding: utf-8 -*-
import urllib2
import re
import time
f=urllib2.urlopen('http://www.360kad.com/dymhh/allclass.shtml').read()
n1=f.find('<dt><span><a href="http://www.360kad.com/Category_47/Index.aspx" target="_blank">')
n2=f.find('4057/Index.aspx" target="_blank">')
n3=f[n1:(n2+90)]
n4=re.sub('<!--[\s\S]*?-->','',n3)
n5=re.findall('<dd>[\s\S]*?</dd>',n4)
#将链接和tag名做成列表模式
w1=[]#链接
e1=re.findall('http://www.360kad.com/Category_\d{1,}/Index.aspx',''.join(n5))
for i1 in e1:
w1.append(i1)
w2=[]#tag名
e2=re.findall('blank">\S{1,}?</a>',''.join(n5))
e3=(''.join(e2)).replace('blank">','').replace('</a>','\n')
for i2 in e3.split('\n'):
w2.append(i2)
w=[]#链接+tag名
for i in range(len(w2)-1):
w.append(w1[i])
w.append(w2[i])
n6=re.findall('_\d{1,}',''.join(n5))
kad_tag=[]
for n in n6:
kad_tag.append(n)
#每个
class Page_list:
def __init__(self):
self.url1='http://www.360kad.com/Category'
def request_open(self,n1,n2):
self.url2='/Index_'+str(n2+1)+'.aspx'
self.page1=urllib2.urlopen(self.url1+kad_tag[n1]+self.url2).read()
#print n2+1
return self.page1
#页码数
class Next_page:
def __init__(self):
self.url1='http://www.360kad.com/Category'
def np(self,n1):
self.page1=urllib2.urlopen(self.url1+kad_tag[n1]+'/Index.aspx').read()
self.url2=re.findall('_\d{1,}.aspx">尾页</a>',self.page1)
#print self.url2
if self.url2:
self.num2=self.url2[-1][1:-17]
#print self.num2
return int(self.num2)
else:
return 1
page_list=Page_list()
page_text=[]
#具体每个页面的产品名、规格、价格、链接
class Page_info:
def __info__(self):
pass
def m(self,n1,n2):#产品名
self.page_name2=[]
self.page_name1=re.findall('title=".*?" rel="nofollow" class',page_list.request_open(n1,n2))
for x in range(len(self.page_name1)):
self.page_name2.append(self.page_name1[x][7:-22])
self.page_num2=[]#规格
self.page_num1=re.findall('"num">.*?<',page_list.request_open(n1,n2))
for i in range(len(self.page_num1)):
self.page_num2.append(self.page_num1[i][6:-2])
self.page_price2=[]#价格
self.page_price1=re.findall('"price">\S{1,}</span>|priceR">\S{1,}?<|<p class="vip_pric"[\s\S]{1,}</p>',page_list.request_open(n1,n2))
for s in self.page_price1:
self.p2=re.findall('style',s)
if self.p2:
self.page_price2.append('没有价格')
else:
self.p3=re.findall('\d{1,}\.\d{1,}',s)
self.page_price2.append(self.p3[-1])
self.page_url2=[]#链接
self.page_url1=re.findall('<a class="name".*?title=',page_list.request_open(n1,n2))
self.u2=re.findall('/product/\d{1,}.shtml',''.join(self.page_url1))
for f in self.u2:
self.page_url2.append('http://www.360kad.com'+f)
for i in range(len(self.page_name2)):
self.dic=[]
self.dic.append(self.page_name2[i])
self.dic.append(self.page_num2[i])
self.dic.append(self.page_price2[i])
self.dic.append(self.page_url2[i])
page_text.append(self.dic)#这里有疑问
page_info=Page_info()
next_page=Next_page()
for x in range(len(w2)-375):#为减少工作量,就简单提取3个标签验证程序
print'正在抓取tag%s中的内容'%(w2[x])
starttime2=time.time()
for i in range(next_page.np(x)):
print'开始抓取第%d页,抓取进度:'%(i+1)
starttim2=time.time()
page_info.m(x,i)#page_text.append(self.dic)会叠加
endtime2=time.time()
print'抓取第%d页完毕,用时%.2fs'%(i+1,endtime2-starttime2)
time.sleep(4)
'''w4=str(w2[x]).decode('utf-8')
t=file('%s.txt'%w4,'w')
#print type(page_text)
page_text0=''.join('%s' %id for id in page_text)
#page_text1=page_text0.decode('utf-8')
t.write(page_text0)
t.close()'''#tag保存的内容会叠加,没有搞清楚怎么解决
endtime2=time.time()
print '抓取%s标签完毕,用时%.2fs'%(w2[x],endtime2-starttime2)
f=file('KAD_ALL.html','w')
f.write('<!DOCTYPE html>\n<html>\n<head>\n')
f.write('<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n')
f.write('</head>\n\n<body>\n')
s=1
for i in page_text:
f.write('<p>'+str(s)+'. '+'<a href=\"'+i[3]+'\">'+i[0]+'</a>'+',规格:'+i[1]+'价格:'+i[2]+'\n')
s=s+1
f.write('</body>')
f.close()
print'抓取完成,请查看'
因为工程量略大,所以想分tag保存,避免后期出问题,之前的工作还得重新来。结果出现append内容叠加的情况,就是说第二个tag保存的内容内也包含了之前的第一个tag的内容。