Python【原创】Python网络爬虫

Python爬虫-获取拉勾网Python岗位的职位信息

2020-04-20  本文已影响0人  复苏的兵马俑

A、实现功能:
  1、获取拉勾网Python岗位的职位信息,并存储到CSV文件中;
  2、采用面向对象的思路来实现;
  3、采用Selenium+chromedriver方案获取网页内容。

B、使用模块:selenium模块、re模块、lxml模块、time模块和csv模块。

from selenium import webdriver
from lxml import etree
import re
import time
from selenium.webdriver.support.ui import Select,WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import csv

class LagouSpider(object):
    driver_path = r"D:\chromedriver\chromedriver.exe"
    def __init__(self):
        self.driver = webdriver.Chrome(executable_path=LagouSpider.driver_path)
        self.url = 'https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput='
        header = ['position_name', 'company_name', 'salary', 'city', 'work_years', 'education', 'desc']
        fp = open('lagou.csv', 'a', encoding='utf-8', newline='')
        self.writer = csv.DictWriter(fp, header)
        self.writer.writeheader()


    def run(self):
        self.driver.get(self.url)
        while True:
            source = self.driver.page_source
            WebDriverWait(driver=self.driver, timeout=10).until(EC.presence_of_element_located((By.XPATH, "//div[@class='pager_container']/span[last()]")))
            self.parse_list_page(source)
            confirm_btn = self.driver.find_element_by_xpath("//div[@class='modal-footer']/button")
            if "subscribe-modal  subscribe-dn" in confirm_btn.get_attribute('class'):
                confirm_btn.click()
            body_btn = self.driver.find_element_by_class_name('body-btn')
            if "body-container showData" in body_btn.get_attribute("class"):
                body_btn.click()
            next_btn = self.driver.find_element_by_xpath("//div[@class='pager_container']/span[last()]")
            if "pager_next pager_next_disabled" in next_btn.get_attribute("class"):
                return self.positions
                break
            else:
                next_btn.click()
            time.sleep(1)

    def parse_list_page(self, source):
        html = etree.HTML(source)
        links = html.xpath("//a[@class='position_link']/@href")
        for link in links:
            self.request_detail_page(link)
            time.sleep(1)

    def request_detail_page(self, url):
        self.driver.execute_script("window.open('{}')".format(url))
        self.driver.switch_to.window(self.driver.window_handles[1])
        source = self.driver.page_source
        WebDriverWait(self.driver, timeout=10).until(EC.presence_of_element_located((By.XPATH, "//div[@class='job-name']")))
        self.parse_detail_page(source)
        # 关闭当前详情页
        self.driver.close()
        # 继续切换回职位列表页
        self.driver.switch_to.window(self.driver.window_handles[0])

    def parse_detail_page(self, source):
        html = etree.HTML(source)
        position_name = html.xpath("//div[@class='job-name']/@title")[0]
        job_request_spans = html.xpath("//dd[@class='job_request']//span")
        salary = job_request_spans[0].xpath('.//text()')[0].strip()
        city = job_request_spans[1].xpath('.//text()')[0].strip()
        city = re.sub(r"[\s/]", "", city)
        work_years = job_request_spans[2].xpath('.//text()')[0].strip()
        work_years = re.sub(r"[\s/]", "", work_years)
        education = job_request_spans[3].xpath('.//text()')[0].strip()
        education = re.sub(r"[\s/]", "", education)
        desc = "".join(html.xpath("//dd[@class='job_bt']//text()")).strip()
        desc = re.sub(r"[\s\\n]", "", desc)
        company_name = html.xpath("//h3[@class='fl']/text()")[0].strip()
        position = {
            'position_name': position_name,
            'company_name': company_name,
            'salary': salary,
            'city': city,
            'work_years': work_years,
            'education': education,
            'desc': desc
        }
        print(position)
        print("*" * 100)
        self.writer.writerow(position)

if __name__ == '__main__':
    spider = LagouSpider()
    spider.run()
上一篇下一篇

猜你喜欢

热点阅读