package com.crawler.crawler.tool.zyu;

import com.crawler.crawler.tool.zyu.pipeline.DataPipeline;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.scheduler.BloomFilterDuplicateRemover;
import us.codecraft.webmagic.scheduler.QueueScheduler;

public class ZyuPage implements PageProcessor {

    private Site site=Site.me()
//            .setCharset("gbk")//设置编码
            .setTimeOut(100 * 1000)//设置超时时间
            .setRetrySleepTime(3000)//设置重试的间隔时间
            .setRetryTimes(10);//设置重试的次数

    private int lastPage;
    /**
     * process the page, extract urls to fetch, extract the data and store
     *
     * @param page page
     */
    @Override
    public void process(Page page) {
        //查找链接的最后一页面
        String lastPageHtml=page.getHtml().css("a.extend").links().toString();
        String[] lastPages=lastPageHtml.split("/page/");
        lastPage=Integer.parseInt(lastPages[1].replaceAll("/",""));
        page.putField("result",lastPage);
    }

    /**
     * get the site settings
     *
     * @return site
     * @see Site
     */
    @Override
    public Site getSite() {
        return site;
    }

    /**
     * 得到页数
     * @param url
     * @return
     */
    //initialDelay当任务启动后，等等多久执行方法
    //fixedDelay每个多久执行方法
    //@Scheduled(initialDelay = 1000, fixedDelay = 100 * 1000)
    public void process(String url, DataPipeline dataPipeline){
        Spider.create(new ZyuPage())
                .addUrl(url)
                .setScheduler(new QueueScheduler().setDuplicateRemover(new BloomFilterDuplicateRemover(5000)))
                .thread(3)
                .addPipeline(dataPipeline)
                .run();
    }
}
