package org.jeecg.crawler.other;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.rocks.BreadthCrawler;
import lombok.SneakyThrows;
import org.jsoup.nodes.Element;
import org.jsoup.nodes.Node;
import org.jsoup.select.Elements;
import org.springframework.util.CollectionUtils;

import java.util.*;

/**
 * 新华网
 */
public class XHNewsCrawler extends BreadthCrawler {
    /**
     * @param crawlPath crawlPath is the path of the directory which maintains
     *                  information of this crawler
     * @param autoParse if autoParse is true,BreadthCrawler will auto extract
     *                  links which match regex rules from pag
     *
     */
    private String date;
    private String baseUrl ;
    private  String page ;
    private  String mainUrl;



    private Map<String,String>  urlMap = new HashMap<>();

    //public List<CrawlerInfo> crawlerInfoList = new ArrayList<>();


    public XHNewsCrawler(String crawlPath, boolean autoParse, String date, String url, Integer pageNum,List<String> keywords) {

        super(crawlPath, autoParse);

        if(!CollectionUtils.isEmpty(keywords)){
            for (String keyword : keywords) {
                for(int i = 1; i<= pageNum ;i++){
                    this.addSeed(url+"/"+keyword+"/"+i+"/");

                }
            }

        }


        this.date = date;

        setThreads(40);
        getConf().setTopN(100);

        //enable resumable mode
        //setResumable(true);
    }

    @SneakyThrows
    @Override
    public void visit(Page page, CrawlDatums next) {

        Elements elements = page.select("div[class=news] h2");
        Iterator<Element> iterator = elements.iterator();
        while(iterator.hasNext()){
            Element e = iterator.next();
            List<Node> nodes = e.childNodes();
            for (Node node : nodes) {
                String href = node.attributes().get("href");
                if(href !=null){
                    if(href.contains(".htm")){
                        System.out.println(href);
                        System.out.println(e.text());
                        //System.out.println(baseUrl+mainUrl+date+"/"+href);
                       // urlMap.put(baseUrl+mainUrl+date+"/"+href,e.text());
                       // crawlerInfoList.add(crawlerInfo);

                    }

                }

            }


         }

        }


    public static void main(String[] args) throws Exception {
        List<String > keywordsList = new ArrayList<>();
        keywordsList.add("南京");
        XHNewsCrawler crawler = new XHNewsCrawler("crawl", true,"/202107/07/","https://so.news.cn/#search/0",2,keywordsList);
        /*start crawl with depth of 4*/
        crawler.start(1);
    }





    public String getDate() {
        return date;
    }

    public void setDate(String date) {
        this.date = date;
    }

    public String getBaseUrl() {
        return baseUrl;
    }

    public void setBaseUrl(String baseUrl) {
        this.baseUrl = baseUrl;
    }

    public String getPage() {
        return page;
    }

    public void setPage(String page) {
        this.page = page;
    }

    public String getMainUrl() {
        return mainUrl;
    }

    public void setMainUrl(String mainUrl) {
        this.mainUrl = mainUrl;
    }

    public Map<String, String> getUrlMap() {
        return urlMap;
    }

    public void setUrlMap(Map<String, String> urlMap) {
        this.urlMap = urlMap;
    }
}
