webmagic+Xpath实现简单爬取
2018-08-06 本文已影响0人
夏日橘子冰
webmagic是一个简单上手的爬虫框架,提供简单灵活的API,只需少量代码即可实现一个爬虫
一、引入jar包
<dependency>
<groupId>us.codecraft</groupId>
<artifactId>webmagic-core</artifactId>
<version>0.5.2</version>
</dependency>
<dependency>
<groupId>us.codecraft</groupId>
<artifactId>webmagic-extension</artifactId>
<version>0.5.2</version>
</dependency>
二、创建爬取类实现PageProcessor
@Component
@Scope("prototype")
public class JcFootballResultProcessor implements PageProcessor{
private Logger logger = LoggerFactory.getLogger(this.getClass());
private Gson gson = new Gson();
@Autowired
private MatchResultMapper matchResultMapper;
private Site site = Site.me().setSleepTime(1).setRetryTimes(3); //爬取时间间隔和重试次数
public static final String FOOTBALL_URL = "http://info\\.sporttery\\.cn/football/match_result\\.php\\?page=\\d";
private static final String FOOTBALL_URL_REUSLT_INFO = "http://i.sporttery.cn/api/fb_match_info/get_pool_rs/?f_callback=pool_prcess&mid=";
@Override
public Site getSite() {
return site;
}
@Override
public void process(Page page) {
Pattern pattern = Pattern.compile(FOOTBALL_URL);
Matcher matcher = pattern.matcher(page.getUrl().toString());
if(!matcher.matches()) {
return;
}
//获取从某个div后的所有tr的元素,自动转为一个集合
List<Selectable> trSelectableList = page.getHtml().xpath("//div[@class=\"match_list\"]").xpath("//tr").nodes();
for(Selectable trSelectable:trSelectableList) {//遍历集合,拿到集合里td元素里的html,又自动转为一个结合,进而可以获取自己需要的网站内容文字
List<String> contentList = trSelectable.xpath("//td/html()").all();
if(contentList!=null && !contentList.isEmpty() && contentList.size()>10) {
String date = contentList.get(0);
String matchId = contentList.get(1);
if(!contentList.get(9).equals("已完成") && !contentList.get(9).equals("取消")){
logger.error("赛事"+date+":"+matchId+"非完成/取消状态,跳过抓取");
continue;
}
String tmpid = getMatchUniqueId(matchId);
String matchUniqueId = "FB" + DateUtil.calWeek(date,tmpid.substring(0, 1)) + tmpid;
}
......//中间省略
page.addTargetRequests(page.getHtml().links().regex(FOOTBALL_URL).all());//指定抓取html页面的符合此正则表达式的所有链接url
}
三、启动爬取程序
//注入上面的processor
Spider.create(jcFootballResultProcessor).addUrl(FOOTBALL_URL_RESULT).thread(1).run();//addUrl是定义从哪一个页面开始爬取