打卡:2-2爬二手电话号码

2016-08-01  本文已影响0人  早禾

目标

爬取这里的详情页网址+详情页信息


http://bj.58.com/shoujihao/ 详情页和要爬的信息

难点

源代码

获取网址代码
from bs4 import BeautifulSoup
import requests
import time
import pymongo
client = pymongo.MongoClient('localhost',27017)
phone = client['phone']
phone_urls = phone['phone_urls']

start_url = 'http://bj.58.com/shoujihao/'
end = False
def get_phone_urls( page, url):
    wb_data = requests.get(url)
    soup = BeautifulSoup(wb_data.text,'lxml')
    urls = soup.select('#infolist > div > ul > div.boxlist > ul > li > a.t')    print(len(urls))
    if len(urls) == 0:
        global end
        end = True
    else:
        for url in urls:
            phone_urls.insert_one({'page':page,'url':url.get('href')})

for i in range(1,150):
    if end == False:
        time.sleep(1)
        get_phone_urls(i, start_url+'pn{}/'.format(i))
        print('Page = ', i,' down')
        time.sleep(1)
    else: 
        breakprint('End')
获取详情代码
from bs4 import BeautifulSoup
import requests
import time
import pymongo

client = pymongo.MongoClient('localhost',27017)
phone = client['phone']
phone_urls = phone['phone_urls']
phone_details = phone['phone_details']
count = 0

def get_url_from(page):
    list = []
    for i in phone_urls.find({'page': page}):
        url = i['url']
        if 'bj.58.com' in url.split('/'):
            list.append(url)
        else:
            pass
    return list

def get_phone_detail(url):
    resp = requests.get(url)
    soup = BeautifulSoup(resp.text,'lxml')
    titles = soup.select('#main > div.col.detailPrimary.mb15 > div.col_sub.mainTitle > h1')
    prices = soup.select('span.price')
    areas = soup.select('#main > div.col.detailPrimary.mb15 > div.col_sub.sumary > ul > li:nth-of-type(2) > div.su_con')    names = soup.select('li > a.tx')
    phones = soup.select('span[id="t_phone"]')
    title = titles[0].get_text().replace(' ','').split('\n') if titles else None
    sell_phone = title[2] if titles else None
    sell_kind = title[5].strip('\t') if titles else None
    sell_title = title[6].strip('\t') if title[6]!='' else None
    price = prices[0].get_text().replace('\n','').replace(' ','').strip('\t') if prices else None
    area = areas[0].get_text().strip('\n') if areas else None
    name = names[0].get_text().strip('\n') if names else None
    phone = phones[0].get_text().replace('\n','').strip('\t') if phones else None
    zidian = {
        '在售号码':sell_phone,
        '号码类型':sell_kind,
        '网页标题':sell_title,
        '销售价格':price,
        '销售地区':area,
        '卖主姓名':name,
        '联系方式':phone
    }
    phone_details.insert_one(zidian)

for i in range(1,117):
    url_list = get_url_from(1)
    print('Page ',i,' start:')
    count = 0
    for url in url_list:
        time.sleep(2)
        count = count + 1
        print(count)
        get_phone_detail(url)
    print('Page ', i, ' down')

总结

输出成果

详情页网址 详情页信息
上一篇 下一篇

猜你喜欢

热点阅读