package com.skytech.component.crawler.parse;

import com.skytech.component.crawler.scratch.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.domain.Example;
import org.springframework.stereotype.Service;


import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;

import static java.util.stream.Collectors.toMap;

@Service
public class ParserLoader {
    @Autowired
    private CrawlerUrlDAO crawlerUrlDAO;
    @Autowired
    private TargetRulesDAO targetRulesDAO;
    @Autowired
    private CustomLocationDAO customLocationDAO;
    @Autowired
    private PartTask partTask;
    @Value("${chrome.driver.path}")
    public String driverPath;

    private Map<String,TargetRules> rulesMap = new HashMap<>();
    private Map<String,List<CustomLocation>> customLocationMap=new HashMap<>();

    private static final Logger log = LoggerFactory.getLogger(ParserLoader.class);

    public void load() {
        load(null,new SeleniumDownloader(driverPath),new SaveDataImpl());
    }
    public void load(String ruleId,HtmlDownloader htmlDownloader,ISaveData simpl){
        log.info("{},解析开始",ruleId);
        /*log.info(ruleId+"解析开始");*/
        // todo 提高效率将一些数据加载到内存中
        rulesMap = targetRulesDAO.findAll().stream().collect(toMap(TargetRules::getId, Function.identity()));
        List<String> ruleIdList = new ArrayList<>();

        customLocationDAO.findAll().stream().forEach(item->ruleIdList.add(item.getRuleId()));
        List<String> stringList =ruleIdList.stream().distinct().collect(Collectors.toList());

        for (String c:stringList) {
            customLocationMap.put(c, customLocationDAO.findByRuleId(c));
        }

        CrawlerUrl crawlerUrl = new CrawlerUrl();
        crawlerUrl.setParse("no");
        crawlerUrl.setRuleId(ruleId);
        List<CrawlerUrl> all = crawlerUrlDAO.findAll(Example.of(crawlerUrl));
        all.sort(Comparator.comparing(CrawlerUrl::getRuleId));
        // 将没有解析的页面拿出来解析
        if(all.size()>1000){
            List<List<CrawlerUrl>> divide = divide(all, 5);
            for (int i = 0; i < divide.size(); i++) {
                partTask.partTask(divide.get(i),rulesMap,customLocationMap,i,htmlDownloader,simpl);
            }

        }else{
            partTask.partTask(all,rulesMap,customLocationMap,1,htmlDownloader,simpl);
        }
    }

    private List<List<CrawlerUrl>> divide(List<CrawlerUrl> all,Integer size){
        List<List<CrawlerUrl>> result = new ArrayList<>();
        int count = all.size()/size;
        for(int j = 0;j<size;j++){
            if(j == size-1){
                result.add(all.subList(j*count,all.size()));
            }else{
                result.add(all.subList(j*count,(j+1)*count));
            }

        }
        return result;
    }

}
