爬取基于Ajax技术网页数据

2018-12-23  本文已影响0人  田小田txt

爬取动态网页数据:

1.需求:

爬取什么网站获得什么内容,存储在excel、txt与MySQL数据库中。

2.分析:

图片.png 图片.png

4.页面案例(拉勾网获取职位信息案例):

  from urllib import request,parse
  import json
  import pymysql
  import time

  def lagouspider(url,formdata):
        response_data = loaddata(url,formdata)#发起请求返回响应结果
      data = json.loads(response_data)#json字符串转化为Python数据类型

      if data['success']:
            print('请求成功')
        #拿到职位信息
           postionjobs = data['content']['positionResult']['result']
           for job in postionjobs:
                jobdata = {}
                jobdata['positionName'] = job['positionName']
                jobdata['publishtime'] = job['formatCreateTime']
                jobdata['companyName'] = job['companyShortName']
                jobdata['city'] = job['city']
                jobdata['Zones'] = job['businessZones'][0]
                jobdata['salary'] = job['salary']
                jobdata['workYear'] = job['workYear']
                jobdata['xueli'] = job['education']
                jobdata['positionAdvantage'] = job['positionAdvantage']
                jobdata['fuli'] = ','.join(job['companyLabelList'])
                jobdata['size'] = job['companySize']
                jobdata['industry'] = job['industryField']
                jobdata['rongzi'] = job['financeStage']
                jobdata['Logo'] = job['companyLogo']
                save_data_to_db(jobdata)
        #判断是否需要发起下一次请求
        #取出当前页码
            cur_page = data['content']['pageNo']
            #每页数据
            page_size = data['content']['pageSize']
            #总共多少条数据
            total_count = data['content']['positionResult']['totalCount']
            if cur_page * page_size < total_count:
                   next_page = cur_page+1
                   print('继续发起请求'+str(next_page)+'页')
                   formdata['pn'] = next_page
                   time.sleep(3)
                   lagouspider(url,formdata)
            else:
                  print('请求不成功')
                  time.sleep(10)
                  print('重新发起第'+str(formdata['pn'])+'页请求')
                  lagouspider(url,formdata)


    def loaddata(url,formdata):
        formdata = parse.urlencode(formdata).encode('utf-8')
        req_header = {
            'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64;rv:63.0) Gecko/20100101',
            'Referer':'https://www.lagou.com/jobs/list_c%2B%2B?labelWords=&fromSearch=true&suginput='
        }
        req = request.Request(url,headers=req_header,data=formdata)
        response = request.urlopen(req)
        print(response.status)
        json_str = response.read().decode('utf-8')
        return json_str

    def save_data_to_db(jobdata):
        '''
        存储数据
        :param jobdata:
        :return:
        '''
        sql = '''
        insert into lagou(%s)
        values (%s)
        '''%(','.join(jobdata.keys()),','.join(['%s']*len(jobdata)))
        try:
        # 执行sql
            cursor.execute(sql,list(jobdata.values()))
            db.commit()
        except Exception as err:
            print(err)
    db.rollback()

    if __name__ == '__main__':
        #数据库连接
        db = pymysql.Connect(host='127.0.0.1', port=3306, user='****', \
         password='******', database='infotest', charset='utf8')
        # 获取游标对象
        cursor = db.cursor()

         url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
        formdata = {
            'first': 'true',
            'kd': 'c++',
            'pn': '1'
        }
        lagouspider(url,formdata)
上一篇下一篇

猜你喜欢

热点阅读