package com.skytech.component.crawler.scratch.process;

import com.skytech.component.crawler.scratch.base.CrawlerUrl;
import com.skytech.component.crawler.scratch.base.CrawlerUrlDAO;
import com.skytech.component.crawler.scratch.base.TargetRules;
import com.skytech.component.crawler.task.CrawlerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.domain.Example;
import org.springframework.util.StringUtils;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.processor.PageProcessor;

import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;

import static java.util.stream.Collectors.toList;

public class CreateHtmlPageProcessor implements PageProcessor {

    private String contentRegex;
    private String nextPageRegex;

    private CrawlerUrlDAO crawlerUrlDAO;

    public boolean isInitStatus() {
        return initStatus;
    }

    public void setInitStatus(boolean initStatus) {
        this.initStatus = initStatus;
    }

    private boolean initStatus = true;

    private TargetRules targetRules;

    private Set<String> contentUrlSet = new HashSet<>();

    private static final Logger log = LoggerFactory.getLogger(CreateHtmlPageProcessor.class);

    public CreateHtmlPageProcessor(TargetRules targetRules,CrawlerUrlDAO crawlerUrlDAO){
        this.targetRules = targetRules;
        contentRegex = targetRules.getContentRegex();
        nextPageRegex = targetRules.getNextPageRegex();
        this.crawlerUrlDAO = crawlerUrlDAO;
    }
    @Override
    public void process(Page page) {
        if(StringUtils.isEmpty(page.getRawText())){
            log.error("请求{}页面未抓取到数据",page.getRequest());
            return;
        }
        if(StringUtils.isEmpty(contentRegex)&&StringUtils.isEmpty(nextPageRegex)){
            log.error("种子id{}-{}无翻页，无正文页面/匹配规则都为空",targetRules.getId(),page.getRequest());
            return;
        }
        if(!StringUtils.isEmpty(contentRegex)){
            List<String> contentUrl = page.getHtml().links().regex(contentRegex).all();
            if(contentUrl.isEmpty()){
                log.error("规则:{},页面:{}没有匹配的正文",targetRules.toString(),page.getRequest().getUrl());
            }else{
                log.info("开始存储正文内容,数量：{}",contentUrl.size());
                List<CrawlerUrl> collect = contentUrl.stream().map(l -> {
                    CrawlerUrl crawlerUrl = new CrawlerUrl();
                    crawlerUrl.setId(UUID.randomUUID().toString());
                    crawlerUrl.setDomainUrl(targetRules.getSeedUrl());
                    crawlerUrl.setRuleId(targetRules.getId());
                    crawlerUrl.setContentUrl(l);
                    crawlerUrl.setOperateTime(LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")));
                    crawlerUrl.setParse("no");
                    return crawlerUrl;
                }).collect(toList());
                //todo save here
                if(initStatus){
                    crawlerUrlDAO.saveAll(collect);
                }else{
                    List<CrawlerUrl> tempResult = new ArrayList<>();
                    boolean temp = false;
                    for (CrawlerUrl crawlerUrl : collect) {
                        CrawlerUrl c = new CrawlerUrl();
                        c.setRuleId(targetRules.getId());
                        c.setContentUrl(crawlerUrl.getContentUrl());
                        List<CrawlerUrl> all = crawlerUrlDAO.findAll(Example.of(c));
                        if(!all.isEmpty()){
                            temp = true;
                            break;
                        }
                        tempResult.add(crawlerUrl);
                    }
                    crawlerUrlDAO.saveAll(tempResult);
                    //终止当前爬虫
                    if(temp){
                        log.info("遇到重复数据停止爬虫");
                        CrawlerTask.spider.stop();
                    }
                }

            }
        }
        if(!StringUtils.isEmpty(nextPageRegex)){
            List<String> nextPageResult = page.getHtml().links().regex(nextPageRegex).all();
            if(nextPageResult.isEmpty()){
                log.error("规则:{},页面:{}没有发现下一页",targetRules.toString(),page.getRequest().getUrl());
            }else{
                log.info("当前页面发现下一页");
                page.addTargetRequests(nextPageResult);
                nextPageResult.forEach(l -> log.info(l));
            }
        }
    }

    public Set<String> getContentUrlSet() {
        return contentUrlSet;
    }

    @Override
    public Site getSite() {
        return Site.me().setRetryTimes(3).setSleepTime(1000);
    }
}
