package com.test.pipleline;

import com.test.util.StringUtil;
import lombok.extern.slf4j.Slf4j;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.scheduler.BloomFilterDuplicateRemover;
import us.codecraft.webmagic.scheduler.FileCacheQueueScheduler;

import java.util.List;

/**
 * @Author lizhilong
 * @create 2020/1/6 20:03
 * @desc  使用 Pipeline 组件，Scheduler 组件
 */
@Slf4j
public class ImageProcesserPipeline implements PageProcessor {

    //页面URL的正则表达式
    private static final  String REGEX_PAGE_URL = "http://www\\.win4000\\.com/zt/gaoqing_\\w+.html";



    private Site site = Site.me().setCycleRetryTimes(5).setRetryTimes(5).setSleepTime(500).setTimeOut(3 * 60 * 1000)
            .setUserAgent("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0")
            .addHeader("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
            .addHeader("Accept-Language", "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3")
            .setCharset("UTF-8");

    @Override
    public void process(Page page) {

        if (page.getUrl().regex(REGEX_PAGE_URL).match()) {
            //获得所有详情页的连接
            page.addTargetRequests(page.getHtml().links().regex(REGEX_PAGE_URL).all());
            List<String> detailURL = page.getHtml().xpath("//div[@class='list_cont Left_list_cont  Left_list_cont1']/div[@class='tab_tj']/div[@class='tab_box']/div/ul[@class='clearfix']/li/a").links().all();
            page.addTargetRequests(detailURL);
        } else {//详情页
            String detailUrl = page.getUrl().toString();
            log.info("详情页地址：{}", detailUrl);

            String picUrl = page.getHtml().xpath("//div[@class='pic-meinv']/a").css("img", "src").toString();
            log.info("图片下载地址：{}", picUrl);

            String suffixName = picUrl.substring(picUrl.lastIndexOf("."), picUrl.length());
            log.info("图片后缀名：{}", picUrl);

            String allPic = page.getHtml().xpath("//div[@class='ptitle']/em/text()").toString();
            String currentIndex = page.getHtml().xpath("//div[@class='ptitle']/span/text()").toString();
            log.info("当前下载{}/{} 张图片", currentIndex, allPic);

            String title = page.getHtml().xpath("//div[@class='ptitle']/h1/text()").toString();
            String picName = StringUtil.getImageName(title, currentIndex, suffixName);

            //获取下一页的地址
            String nextUrl = page.getHtml().xpath("//div[@class='pic-next-img']/a").links().get();
            page.addTargetRequest(nextUrl);

            page.putField("ImageEntity", ImageEntity.build(picName, picUrl));
        }
    }

    @Override
    public Site getSite() {
        return site;
    }

    /**
     * 使用时记得及时关闭，目前没有限制爬取页数，理论上会一直爬取
     * @param args
     */
    public static void main(String[] args) {
        Spider picSpider = Spider.create(new ImageProcesserPipeline()).addUrl("http://www.win4000.com/zt/gaoqing_1.html")
                .thread(5)
                .addPipeline(new DownLoadPipeline())
                .setScheduler(new FileCacheQueueScheduler("C:\\Imageurl\\").setDuplicateRemover(new BloomFilterDuplicateRemover(100000)));
        picSpider.start();
    }
}
