package org.jeecg.crawler.special;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.rocks.BreadthCrawler;
import lombok.SneakyThrows;
import org.jeecg.common.util.DateTimeUtil;
import org.jeecg.modules.crawlerpaper.entity.CrawlerInfo;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.*;

/**
 * Crawling news from github news
 *
 * @author hu
 */
public class NongMinDetailNewsCrawler extends BreadthCrawler {
    /**
     * @param crawlPath crawlPath is the path of the directory which maintains
     *                  information of this crawler
     * @param autoParse if autoParse is true,BreadthCrawler will auto extract
     *                  links which match regex rules from pag
     *
     */

    private Map<String ,String> urlMap ;


    public List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

    public NongMinDetailNewsCrawler(String crawlPath, boolean autoParse, Map<String ,String> urlMap) {
        super(crawlPath, autoParse);
        for (String s : urlMap.keySet()) {
            System.out.println(s);
            this.addSeed(s);

        }


        this.urlMap = urlMap;
        setThreads(49);
        getConf().setTopN(100);


    }

    @SneakyThrows
    @Override
    public void visit(Page page, CrawlDatums next) {
        page.charset("gbk");

        //div[class=pagenavlist] ul li a
        Elements elements = page.select("td span[class=STYLE9]");
        Iterator<Element> iterator = elements.iterator();
        while(iterator.hasNext()) {

            Element e = iterator.next();
            String href = "http://szb.farmer.com.cn/"+ DateTimeUtil.getTodayYear()+"/"+DateTimeUtil.getTodayChar8();
             href =href+ e.parent().parent().attr("onclick").replace("javascript:window.location.href='..","").replace("'","");
            System.out.println(href);
            CrawlerInfo crawlerInfo = new CrawlerInfo();
            crawlerInfo.setSourceName("农民日报");
            crawlerInfo.setScoureUrl(page.url());
            crawlerInfo.setLayout(urlMap.get(page.url()));
            crawlerInfo.setArticleTime(DateTimeUtil.getTodayChar8En());

            //  crawlerInfo.setId(DateTimeUtil.getTodayChar8En() + href);
            crawlerInfo.setArticleUrl(href);
            crawlerInfo.setArticleName(e.text());
            crawlerInfo.setArticleContent(getHtmlByUrl(href));
            crawlerInfoList.add(crawlerInfo);



        }





    }


    public static void main(String[] args) throws Exception {
//        String baseUrl = "http://njrb.njdaily.cn/html/2020-11/27/node_258.htm";
//        String baseId =  baseUrl.substring(baseUrl.indexOf("http://")+7,baseUrl.indexOf("."));

       Map<String,String> urlMap  = new HashMap<>();
        urlMap.put("http://szb.farmer.com.cn/2021/20210818/20210818_001/20210818_001.html","nih");
        NongMinDetailNewsCrawler crawler = new NongMinDetailNewsCrawler("crawl", true,urlMap);
        /*start crawl with depth of 4*/
       crawler.start(1);
       // System.out.println(getHtmlByUrl("https://static.xuexi.cn/search/online/index.html?query=nanjing&page=1"));

    }






    public List<CrawlerInfo> getCrawlerInfoList() {
        return crawlerInfoList;
    }

    public void setCrawlerInfoList(List<CrawlerInfo> crawlerInfoList) {
        this.crawlerInfoList = crawlerInfoList;
    }


    public static String  getHtmlByUrl(String contentUrl)
            throws Exception {
        URL url = new URL(contentUrl);

        BufferedReader reader = new BufferedReader
                (new InputStreamReader(url.openStream(),"gbk"));
        String line;
        String content ="";
        while ((line = reader.readLine()) != null) {
            content +=line;
        }
        return  content;
    }
}
