从检索的结果中对国自然基金情况进行爬取

2020-04-07  本文已影响0人  一只烟酒僧
######################################################## 
#-------------------------------------------------------
# Topic:通过关键词检索项目信息
# Author:
# Date:Tue Apr 07 17:13:14 2020
# Mail:
#-------------------------------------------------------
########################################################


#name="项目题目中应包含的关键字”
#leader="项目申请人id"
#code="申请的项目编号"
#yearStart="检索的起始日期”
#yearEnd="检索的终止日期“
#subject=”项目所处的学科门类id“
#category="项目类别"
#fundStart="项目最低金额”
#fundEnd=“项目最高金额”
#上述信息中,当 负责人 / 项目批准号 / 依托单位 都不存在时 项目名称 不能为空。
#若某项信息为默认,则为空
# Sys.sleep(600)
# name="单细胞"
# yearStart=2015
# yearEnd=2020
# subject=""
# category=""
# leader=""
get_search_results_FUND<-function(name,yearStart=2015,yearEnd=2020,subject="",category="",leader=""){
  

if(!require(stringr))install.packages("stringr")
if(!require(rvest))install.packages("rvest")
if(!require(curl))install.packages("curl")


library(stringr)
library(rvest)
library(curl)
information<-data.frame()
index=0
#提交检索词
url<-'http://fund.sciencenet.cn/search?'
session<-html_session(url)
form<-html_form(read_html(session))[[2]]
print(form)
# key_word<-readline(prompt = '输入form关键词组成的键值对,逗号隔开,如:name="单细胞",leader=1234')
# key_word<-paste(key_word,",keyWord=0,comSummary=0",sep = "")
# #文本转化为命令
# form_new<-paste("form=set_values(form,",key_word,")",sep = "")
# eval(parse(text = form_new))

for (m in yearStart:yearEnd) {
  index=index+1
  if(index==20){
    print("为防止被封IP,自动进入休眠十分钟")
    Sys.sleep(600)
    index=0
    
  }
  form<-set_values(form,name=name,leader=leader,yearStart=m,yearEnd=m,subject=subject,category=category,keyWord=0,comSummary=0)
  session<-submit_form(session,form)
  url<-session$url
  #获取最大页数
  FUND<-html_session(url)
  FUND<-read_html(FUND)
  total_page<-'//p[@id="page_button2"]/span'
  total_page<-html_nodes(FUND,xpath = total_page)%>%html_text(trim = T)
  total_page<-as.numeric(total_page)
  total_page<-max(total_page[!is.na(total_page)])
  if(total_page==-Inf){total_page=1}#会有信息不足一页的情况
  #获取项目总数
  pro_num<-'//span[@class="l"]/b[1]'
  pro_num<-html_nodes(FUND,xpath = pro_num)%>%html_text(trim = T)
  pro_num<-as.numeric(pro_num)
  if(pro_num>200){print(paste(m,"年","项目多于200,因此只展示前两百",sep=""))}
  if(pro_num==0){next()}
  pro_num
  total_page
  #爬取需要信息!
  for (i in 1:total_page) {
    index=index+1
    if(index==20){
      print("为防止被封IP,自动进入休眠十分钟")
      Sys.sleep(600)
      index=0
      
    }
    url1<-paste(url,"&page=",i,sep = "")
    print(paste("正在访问:",url1,sep=""))
    FUND<-html_session(url1)
    FUND<-read_html(FUND)
    title<-'//p[@class="t"]/a'
    author<-'//span[@class="author"]/i'
    danwei<-'//span[@class="author"]/following-sibling::span/i'
    type<-'//span[@class="author"]/following-sibling::i'
    ID<-'//span[@class="author"]/following-sibling::b'
    year<-'//span[@class="author"]/following-sibling::span/b'
    money<-'//p[@class="ico"]/following-sibling::p//b'
    abstract='//p[@class="t"]/a'
    keywords<-'//div[@class="d"]/p[2]/span/i'
    information1<-data.frame(
                             Title=html_nodes(FUND,xpath =title )%>%html_text(trim = T),
                             Author=html_nodes(FUND,xpath =author )%>%html_text(trim = T),
                             Institution=html_nodes(FUND,xpath =danwei )%>%html_text(trim = T),
                             Type=html_nodes(FUND,xpath =type )%>%html_text(trim = T),
                             ID=html_nodes(FUND,xpath =ID )%>%html_text(trim = T),
                             Year=html_nodes(FUND,xpath =year )%>%html_text(trim = T),
                             Money=html_nodes(FUND,xpath =money )%>%html_text(trim = T),
                             KeyWords=html_nodes(FUND,xpath = keywords)%>%html_text(trim = T),
                             Abstract_url=html_nodes(FUND,xpath = abstract)%>%html_attr(name="href"))
    if(is.na(information1$Title[1])){print(paste("第",i,"页爬取失败,返回空内容",sep=""))}else{print(paste("第",i,"页爬取成功",sep=""))}
    information<-rbind(information,information1) 
  }
  
}
return(information)
}
上一篇下一篇

猜你喜欢

热点阅读