内容url获取蜘蛛

2016-05-19  本文已影响0人  ooocoo
def get_links_from(channel, pages, who_sells=0):  
    list_view = '{}{}/pn{}/'.format(channel, str(who_sells), str(pages))
    wb_data = requests.get(list_view)
    time.sleep(1)
    soup = BeautifulSoup(wb_data.text, 'lxml')
    if soup.find('td', 't'):   #判断页面是否是要爬的正常页面
        for link in soup.select('td.t a.t'):
            item_link = link.get('href').split('?')[0]
            if 'zhuanzhuan' in item_link:
                pass
            else:
                url_list.insert_one({'url': item_link})
                print(item_link)
            # return urls
    else:
        # It's the last page !
        pass
上一篇 下一篇

猜你喜欢

热点阅读