package org.jeecg.crawler.node;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.rocks.BreadthCrawler;
import lombok.SneakyThrows;
import org.jsoup.nodes.Element;
import org.jsoup.nodes.Node;
import org.jsoup.select.Elements;

import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

/**
 * Crawling news from github news
 *
 * @author hu
 */

public class BaseNewsCrawler extends BreadthCrawler {
    /**
     * @param crawlPath crawlPath is the path of the directory which maintains
     *                  information of this crawler
     * @param autoParse if autoParse is true,BreadthCrawler will auto extract
     *                  links which match regex rules from pag
     *
     */
    private String date;
    private String baseUrl ;
    private  String page ;
    private  String mainUrl;


   // public  List<String> urlList = new ArrayList<>();
    private Map<String,String>  urlMap = new HashMap<>();

    //public List<CrawlerInfo> crawlerInfoList = new ArrayList<>();


    public BaseNewsCrawler(String crawlPath, boolean autoParse,String date,String baseUrl,String mainUrl,String page) {

        super(crawlPath, autoParse);
        /*start pages*/
        System.out.println(baseUrl+mainUrl+date+page);
        this.addSeed(baseUrl+mainUrl+date+page);
        this.baseUrl = baseUrl;
        this.page = page;
        this.date = date;
        this.mainUrl = mainUrl;
        setThreads(40);
        getConf().setTopN(100);

        //enable resumable mode
        //setResumable(true);
    }

    @SneakyThrows
    @Override
    public void visit(Page page, CrawlDatums next) {

        Elements elements = page.select("div[class=Chunkiconlist] p");
        Iterator<Element> iterator = elements.iterator();
        while(iterator.hasNext()){
            Element e = iterator.next();
            List<Node> nodes = e.childNodes();
            for (Node node : nodes) {
                String href = node.attributes().get("href");
                if(href !=null){
                    if(href.contains(".html")){
                        //System.out.println(baseUrl+mainUrl+date+"/"+href);
                        urlMap.put(baseUrl+mainUrl+date+"/"+href,e.text());
                       // crawlerInfoList.add(crawlerInfo);

                    }

                }

            }


         }

        }


    public static void main(String[] args) throws Exception {
        BaseNewsCrawler crawler = new BaseNewsCrawler("crawl", true,"/202107/07/","http://njcb.xhby.net/pc","/layout",",node_A01.html");
        /*start crawl with depth of 4*/
        crawler.start(1);
    }





    public String getDate() {
        return date;
    }

    public void setDate(String date) {
        this.date = date;
    }

    public String getBaseUrl() {
        return baseUrl;
    }

    public void setBaseUrl(String baseUrl) {
        this.baseUrl = baseUrl;
    }

    public String getPage() {
        return page;
    }

    public void setPage(String page) {
        this.page = page;
    }

    public String getMainUrl() {
        return mainUrl;
    }

    public void setMainUrl(String mainUrl) {
        this.mainUrl = mainUrl;
    }

    public Map<String, String> getUrlMap() {
        return urlMap;
    }

    public void setUrlMap(Map<String, String> urlMap) {
        this.urlMap = urlMap;
    }
}
