package org.jeecg.crawler.content;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.rocks.BreadthCrawler;
import lombok.SneakyThrows;
import org.jsoup.nodes.Element;
import org.jsoup.nodes.Node;
import org.jsoup.select.Elements;

import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.locks.ReentrantLock;

/**
 * Crawling news from github news
 *
 * @author hu
 */

public class ContentNewsCrawler extends BreadthCrawler {
    /**
     * @param crawlPath crawlPath is the path of the directory which maintains
     *                  information of this crawler
     * @param autoParse if autoParse is true,BreadthCrawler will auto extract
     *                  links which match regex rules from pag
     *
     */
    private String baseUrl ;
    private  String page ;
    private  String  cssElement;
    public  static ReentrantLock lock = new ReentrantLock();

    //public  List<String> urlList = new ArrayList<>();

    private Map<String,String> urlMap = new HashMap<>();



    public ContentNewsCrawler(String crawlPath, boolean autoParse,  String baseUrl,  String page,String cssElement) {

        super(crawlPath, autoParse);
        /*start pages*/
        this.addSeed(baseUrl+page);
        this.baseUrl = baseUrl;
        this.page = page;
        this.cssElement = cssElement;
        setThreads(40);
        getConf().setTopN(100);

        //enable resumable mode
        //setResumable(true);
    }

    @SneakyThrows
    @Override
    public void visit(Page page, CrawlDatums next) {

        //cssElement li[class=BMedlist] a
        Elements elements = page.select(this.cssElement);
        Iterator<Element> iterator = elements.iterator();
        while(iterator.hasNext()){
            Element e = iterator.next();
            List<Node> nodes = e.childNodes();
            for (Node node : nodes) {
                String href = node.parentNode().attributes().get("href");
                if(href !=null){
                    if(href.contains(".htm")){
                        if(href.contains("./")){
                            href =  href.replace("./","");
                        }
                       // urlList.add(baseUrl+"/"+href);
                        urlMap.put(baseUrl+"/"+href,e.text());


                    }

                }

            }


         }

        }


    public static void main(String[] args) throws Exception {
       // ContentNewsCrawler crawler = new ContentNewsCrawler("crawl", true,"http://njrb.njdaily.cn/html/2020-11/27","/node_1.htm");
        /*start crawl with depth of 4*/
     //   crawler.start(1);
    }







    public String getBaseUrl() {
        return baseUrl;
    }

    public void setBaseUrl(String baseUrl) {
        this.baseUrl = baseUrl;
    }

    public String getPage() {
        return page;
    }

    public void setPage(String page) {
        this.page = page;
    }

    public String getCssElement() {
        return cssElement;
    }

    public void setCssElement(String cssElement) {
        this.cssElement = cssElement;
    }

    public Map<String, String> getUrlMap() {
        return urlMap;
    }

    public void setUrlMap(Map<String, String> urlMap) {
        this.urlMap = urlMap;
    }

}
