package com.skytech.component.crawler.scratch.process;

import com.skytech.component.crawler.scratch.chrome.TestChromeService;
import com.skytech.component.crawler.scratch.base.CrawlerUrl;
import com.skytech.component.crawler.scratch.base.CrawlerUrlDAO;
import com.skytech.component.crawler.scratch.base.TargetRules;
import com.skytech.component.crawler.task.CrawlerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.domain.Example;
import org.springframework.util.StringUtils;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.processor.PageProcessor;

import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;

import static java.util.stream.Collectors.toList;


public class SimpleCommonProcessor implements PageProcessor {

    private boolean save = true;//default

    private static final Logger log = LoggerFactory.getLogger(SimpleCommonProcessor.class);

    private String contentRegex;
    private String nextPageRegex;

    private static Site site;
    private CrawlerUrlDAO crawlerUrlDAO;
    private TargetRules targetRules;
    private TestChromeService testChromeService;
    private boolean initStatus = false;

    //todo 查找最新的页面


    public SimpleCommonProcessor(TargetRules targetRules, Site site, CrawlerUrlDAO crawlerUrlDAO,TestChromeService testChromeService){
        this.contentRegex = targetRules.getContentRegex();
        this.nextPageRegex = targetRules.getNextPageRegex();
        this.site = site;
        this.crawlerUrlDAO = crawlerUrlDAO;
        this.targetRules = targetRules;
        this.testChromeService=testChromeService;

    }
    public SimpleCommonProcessor(TargetRules targetRules, Site site, CrawlerUrlDAO crawlerUrlDAO){
        this.contentRegex = targetRules.getContentRegex();
        this.nextPageRegex = targetRules.getNextPageRegex();
        this.site = site;
        this.crawlerUrlDAO = crawlerUrlDAO;
        this.targetRules = targetRules;


    }

    public void setSave(boolean save) {
        this.save = save;
    }

    @Override
    public void process(Page page) {

        //todo 当前页面是以前爬取过的页面则跳过数据
        if(StringUtils.isEmpty(contentRegex)&&StringUtils.isEmpty(nextPageRegex)){
            log.error("{}-跳过当前页面",page.getRequest().getUrl());
            return;
        }
        if(StringUtils.isEmpty(page.getRawText())){
            log.error("{}-页面下载失败",page.getRequest().getUrl());
            return;
        }
        log.info("当前页面 :{},链接:{}",targetRules.getSeedDescribe(),page.getHtml().getDocument().location());
        List<String> links = page.getHtml().links().all();
        log.info("发现当前页面链接数量{}",links.size());
        log.info("当前页面链接分别是");
        links.forEach(l -> log.info(l));
        if(StringUtils.isEmpty(contentRegex)){
            log.error("{}-当前页面没有配置正文正则",page.getRequest().getUrl());
            return;
        }
        log.info("开始过滤当前页面");
        List<String> regexResult = page.getHtml().links().regex(contentRegex).all();
        log.info("过滤之后链接数量{}",regexResult.size());
        log.info("过滤之后的链接分别是");
        if(regexResult.isEmpty()){
            log.error("规则:{},页面:{}没有匹配的正文",targetRules.toString(),page.getRequest().getUrl());
        }
        List<CrawlerUrl> collect = regexResult.stream().map(l -> {
            CrawlerUrl c = new CrawlerUrl();
            c.setId(UUID.randomUUID().toString());
            c.setContentUrl(l);
            c.setDomainUrl(page.getHtml().getDocument().location());
            c.setRuleId(targetRules.getId());
            c.setParse("no");
            c.setOperateTime(LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")));
            return c;
        }).collect(toList());

        if(initStatus){
            //如果是初始化就直接保存数据
            crawlerUrlDAO.saveAll(collect);
        }else{
            //如果发现最新的重复的页面就停止爬取数据
            List<CrawlerUrl> result = new ArrayList<>();
            boolean flag = false;
            for (CrawlerUrl crawlerUrl : collect) {
                CrawlerUrl c = new CrawlerUrl();
                c.setRuleId(targetRules.getId());
                c.setContentUrl(crawlerUrl.getContentUrl());
                List<CrawlerUrl> all = crawlerUrlDAO.findAll(Example.of(c));
                if(!all.isEmpty()){
                    flag = true;
                    break;
                }
                result.add(crawlerUrl);
            }
            crawlerUrlDAO.saveAll(result);
            //终止当前爬虫
            if(flag){
                log.info("遇到重复数据停止爬虫");
                CrawlerTask.spider.stop();
            }

        }

        if(StringUtils.isEmpty(nextPageRegex)){
            log.info("不需要过滤下一页链接");
        }else{
            log.info("开始过滤下一页的链接");
            String url=testChromeService.nextUrl(page.getRequest().getUrl());
            if(StringUtils.isEmpty(url)){
                log.error("规则:{},页面:{}没有发现下一页",targetRules.toString(),page.getRequest().getUrl());
            }else{
             //   page.addTargetRequests(nextPageResult);
                page.addTargetRequest(url);

            }
        }


    }

    public boolean isInitStatus() {
        return initStatus;
    }

    public void setInitStatus(boolean initStatus) {
        this.initStatus = initStatus;
    }

    @Override
    public Site getSite() {
        return site;
    }
}
