package org.jeecg.crawler.other;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.gargoylesoftware.htmlunit.BrowserVersion;
import com.gargoylesoftware.htmlunit.NicelyResynchronizingAjaxController;
import com.gargoylesoftware.htmlunit.WebClient;
import com.gargoylesoftware.htmlunit.html.HtmlPage;
import com.sun.jmx.snmp.Timestamp;
import org.jeecg.modules.crawlerpaper.entity.CrawlerInfo;
import org.jeecgframework.p3.core.util.HttpUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.util.CollectionUtils;
import org.springframework.util.StringUtils;

import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * Crawling news from github news
 *
 * @author hu
 */

public class SearchNewsCrawler  {

    //学习强国
    public  final static    String xxqgUrl =   "https://static.xuexi.cn/search/online/index.html";

    //中国日报
    public  final  static  String  zgrbUrl = "https://newssearch.chinadaily.com.cn/rest/cn/search?sort=dp&curType=story";

    //cctv 新闻网
    public  final  static  String  cctvUrl = "https://search.cctv.com/search.php?sort=relevance&type=web&vtime=&datepid=1&channel=";


    public static void main(String[] args) throws Exception {
//        WebClient webClient = new WebClient();
//        boolean javaScriptEnabled = webClient.isJavaScriptEnabled();
     //   webClient.getPage("https://static.xuexi.cn/search/online/index.html?query=%E5%8D%97%E4%BA%AC&page=1&search_source=1&program_id=0&_t=1627976639634&t=1627976639844");
       // getJRTTPageResponse("https://so.toutiao.com/search?keyword=%E5%8D%97%E4%BA%AC&pd=synthesis&source=pagination&dvpf=pc&aid=4916&page_num=2&search_id=202108041415070101501341671B14A1C7&min_time=1628006400&max_time=1628092799");
       // getZGRBPageResponse("南京",0);

       // getXXQGPageResponse("https://sou.chinanews.com/search.do?q=%E5%8D%97%E4%BA%AC&start=10");
        List<String > keywords = new ArrayList<>();
        keywords.add("南京");
      //  getGJZXageResponse( keywords,2);
       // getCCTVNewsPageResponse("https://search.cctv.com/search.php?qtext=%E5%8D%97%E4%BA%AC&sort=relevance&type=web&vtime=&datepid=1&channel=&page=2");
        //getXinhuaResponse(keywords,2);
        getPeopleResponse( keywords, 2, 0L, 0L);
    }


    /**
     * 获取学习强国新闻
     * @param keywords
     * @param page
     * @return
     * @throws Exception
     */
    public  static List<CrawlerInfo>  getXXQGNews(List<String > keywords, Integer page) throws Exception {
        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    List<CrawlerInfo>  iCrawlerInfoList = getXXQGPageResponse(xxqgUrl+"?page="+i+"&query="+keyword);
                   if(!CollectionUtils.isEmpty(iCrawlerInfoList)){
                       crawlerInfoList.addAll(iCrawlerInfoList);
                   }
                }
            }


        }
        return   crawlerInfoList ;


    }


    /**
     * 南京广播tv
     * @param keywords
     * @param page
     * @return
     * @throws Exception
     */
    public  static List<CrawlerInfo>  getNanJingGuangBoNews(List<String > keywords,Integer page) throws Exception {
        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    String Url = "http://search.nbs.cn/search?page="+i+"&channelid=272722&searchword="+keyword+"&keyword="+keyword+"&perpage=10&outlinepage=10&orderby=-DOCID2";

                    List<CrawlerInfo>  iCrawlerInfoList = getNanJingGuangBoPageResponse(Url);
                    if(!CollectionUtils.isEmpty(iCrawlerInfoList)){
                        crawlerInfoList.addAll(iCrawlerInfoList);
                    }
                }
            }


        }
        return   crawlerInfoList ;


    }

    /**
     * 新华业报网
     * @param keywords
     * @param page
     * @return
     * @throws Exception
     */
    public  static List<CrawlerInfo>  getXinHuaYeBaoNews(List<String > keywords,Integer page) throws Exception {
        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    //http://so.jschina.com.cn/was5/web/search?page=3&channelid=220670&searchword=%E5%8D%97%E4%BA%AC&keyword=%E5%8D%97%E4%BA%AC&orderby=-DocPubTime&token=16.1422004478651.24
                    String Url = "http://so.jschina.com.cn/was5/web/search?page="+i+"&channelid=220670&searchword="+keyword+"&keyword="+keyword+"&perpage=10&orderby=-DocPubTime&token=6.1422004478651.24";
                    System.out.println(Url);
                    List<CrawlerInfo>  iCrawlerInfoList = getXinHuaYeBaoPageResponse(Url);
                    if(!CollectionUtils.isEmpty(iCrawlerInfoList)){
                        crawlerInfoList.addAll(iCrawlerInfoList);
                    }
                }
            }


        }
        return   crawlerInfoList ;


    }


    /**
     *
     * http://so.jschina.com.cn/was5/web/search?page=3&channelid=220670&searchword=%E5%8D%97%E4%BA%AC&keyword=%E5%8D%97%E4%BA%AC&orderby=-DocPubTime&token=16.1422004478651.24
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getXinHuaYeBaoPageResponse(String url) throws Exception {
        //   WebClient  webClient = getClient();

        String result =  HttpUtils.get( url);

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        Document doc = Jsoup.parse(result);

        Elements elementsByClass = doc.getElementsByClass("searchresulttitle");

        for (Element byClass : elementsByClass) {

            String href = byClass.attr("href");
            String title = byClass.text();
            String pubtime = byClass.parent().parent().getElementsByClass("pubtime").text();
            System.out.println(href+"-"+title+"-"+pubtime);
            CrawlerInfo crawlerInfo = new CrawlerInfo();

            crawlerInfo.setId(getHrefId(href,"shtml"));
            crawlerInfo.setArticleTime(pubtime.substring(0,10).replace(".","-"));
            crawlerInfo.setArticleName(title);

            crawlerInfo.setArticleUrl(href);
            crawlerInfo.setSourceName("新华业报网");
            crawlerInfo.setScoureUrl("http://so.jschina.com.cn/");
            crawlerInfoList.add(crawlerInfo);
        }
        return crawlerInfoList;
    }

    /**
     * 南京广播电视台
     * http://search.nbs.cn/search?page=3&channelid=272722&searchword=%E5%8D%97%E4%BA%AC&keyword=%E5%8D%97%E4%BA%AC&perpage=10&outlinepage=10&orderby=-DOCID2
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getNanJingGuangBoPageResponse(String url) throws Exception {
        //   WebClient  webClient = getClient();

        String result =  HttpUtils.dogetJson( url);

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        Document doc = Jsoup.parse(result);

        Elements elementsByClass = doc.getElementsByClass("fix");

        for (Element byClass : elementsByClass) {

            Elements children = byClass.children();
            CrawlerInfo crawlerInfo = new CrawlerInfo();

            for (Element child : children) {
                Elements hrefEle = child.getElementsByTag("a");
                if(hrefEle.size()>0){
                    String href = hrefEle.attr("href");
                    if(hrefEle.size()<=1 && !StringUtils.isEmpty(href) && !href.equals("#")&&!href.contains("search?&channelid=")){
                        crawlerInfo.setArticleUrl(href);
                        String title = hrefEle.text();

                        crawlerInfo.setId(getHrefId(  href,"html"));
                        crawlerInfo.setArticleName(title);
                        Elements timeEle = byClass.getElementsByTag("span");

                        if(timeEle.size()>0){
                            String time = timeEle.text();
                            crawlerInfo.setArticleTime(time.substring(0,10).replace(".","-"));
                        }
                        crawlerInfo.setScoureUrl("http://www.nbs.cn/");
                        crawlerInfo.setSourceName("南京广播电视台");
                        System.out.println(crawlerInfo);
                        crawlerInfoList.add(crawlerInfo);
                    }

                }

            }

        }

        return crawlerInfoList;
    }
    /**
     * 获取cctv新闻
     * @param keywords
     * @param page
     * @return
     * @throws Exception
     */
    public  static List<CrawlerInfo>  getCCTVNews(List<String > keywords,Integer page) throws Exception {
        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    List<CrawlerInfo>  iCrawlerInfoList = getCCTVNewsPageResponse(cctvUrl+"&page="+i+"&qtext="+keyword);
                    if(!CollectionUtils.isEmpty(iCrawlerInfoList)){
                        crawlerInfoList.addAll(iCrawlerInfoList);
                    }
                }
            }


        }
        return   crawlerInfoList ;


    }





    /**
     * https://so.toutiao.com/search?keyword=%E5%8D%97%E4%BA%AC&dvpf=pc&page_num=1&min_time=1628006400&max_time=1628092799
     * 中国日报
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getZGRBPageResponse(List<String > keywords,Integer page) throws Exception {

        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =0;i<=page;i++){
                for (String keyword : keywords) {
                    String  url = zgrbUrl+"&keywords="+keyword+"&page="+i;
                    String s = HttpUtils.get(url);
                    JSONObject jsonObject = JSONObject.parseObject(s);
                    if(jsonObject !=null){
                        JSONArray jsonArray = jsonObject.getJSONArray("content");
                        if(jsonArray !=null){
                            for (Object o : jsonArray) {
                                CrawlerInfo crawlerInfo = new CrawlerInfo();
                                JSONObject content = JSONObject.parseObject(o.toString());
                                String id=  content.get("id").toString();
                                String articleUrl=  content.get("url").toString();
                                String source=  content.get("source").toString();
                                String pubDateStr=  content.get("pubDateStr").toString();
                                String title =  content.get("title").toString();

                                crawlerInfo.setId(id);
                                crawlerInfo.setSourceName("中国日报");
                                crawlerInfo.setArticleUrl(articleUrl);
                                crawlerInfo.setLayout(source);
                                crawlerInfo.setScoureUrl("http://newssearch.chinadaily.com.cn/search");
                                crawlerInfo.setArticleTime(pubDateStr.substring(0,pubDateStr.length()-5));
                                crawlerInfo.setArticleName(title);
                                crawlerInfoList.add(crawlerInfo);


                            }
                        }

                    }
                }
            }


        }
        return   crawlerInfoList ;




    }



    /**
     * http://s.cnr.cn/index_sub.html?key=%E7%96%AB%E6%83%85
     * 央广网
     * @param
     * @return
     * @throws Exception
     * starttime":"2021/08/01'
     */
    public static List<CrawlerInfo> getYGWPageResponse(List<String > keywords,Integer page,String startTime,String endTime) throws Exception {

        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    HashMap<String,String> paramMap = new HashMap<>();
                    paramMap.put("page",String.valueOf(i));
                    paramMap.put("doctitle",keyword);
                    paramMap.put("starttime",startTime);
                    paramMap.put("endtime",endTime);

                    paramMap.put("size","10");

                    String s = HttpUtils.doPostJson("http://s.cnr.cn/search", JSONObject.toJSONString(paramMap));
                    JSONObject jsonObject = JSONObject.parseObject(s);
                    if(jsonObject !=null ){
                        JSONArray jsonArray = JSONObject.parseArray(jsonObject.getString("datas"));
                        if(jsonArray !=null  && jsonArray.size()>0){
                            for (Object o : jsonArray) {
                                CrawlerInfo crawlerInfo = new CrawlerInfo();
                                JSONObject content = JSONObject.parseObject(o.toString());
                                String articleUrl=  content.get("docpuburl").toString();
                                String source=  content.get("docsourcename").toString();
                                String pubDateStr=  content.get("crtime").toString();
                                String title =  content.get("doctitle").toString();
                                String[] split = articleUrl.split("/");
                                crawlerInfo.setId(split[split.length-1].replace(".shtml",""));
                                crawlerInfo.setSourceName("央广网");
                                crawlerInfo.setArticleUrl(articleUrl);
                                crawlerInfo.setLayout(source);
                                crawlerInfo.setScoureUrl("http://s.cnr.cn/search");
                                crawlerInfo.setArticleTime(pubDateStr.substring(0,9).replace("/","-"));
                                crawlerInfo.setArticleName(filenameFilter( title));
                                crawlerInfoList.add(crawlerInfo);


                            }
                        }

                    }
                }
            }


        }
        return   crawlerInfoList ;




    }


    /**
     * http://search.people.cn/s?keyword=%E7%96%AB%E6%83%85&st=0&_=1628154718194
     * 人民网
     * @param
     * @return
     * @throws Exception
     * starttime":"2021/08/01'
     * {"key":"南京","page":1,"limit":10,"hasTitle":true,"hasContent":true,"isFuzzy":true,"type":0,"sortType":2,"startTime":1628092800000,"endTime":1628175600000}
     */
    public static List<CrawlerInfo> getPeopleResponse(List<String > keywords,Integer page,Long startTime,Long endTime) throws Exception {

        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    HashMap<String,Object> paramMap = new HashMap<>();
                    paramMap.put("page",String.valueOf(i));
                    paramMap.put("doctitle",keyword);
                    paramMap.put("startTime",startTime);
                    paramMap.put("endTime",endTime);
                    paramMap.put("hasTitle",true);
                    paramMap.put("hasContent",false);
                    paramMap.put("isFuzzy",true);
                    paramMap.put("type",0);
                    paramMap.put("sortType",2);


                    paramMap.put("limit","10");

                    String s = HttpUtils.doPostJson("http://search.people.cn/api-search/front/search", JSONObject.toJSONString(paramMap));
                    JSONObject jsonObject = JSONObject.parseObject(s);
                    if(jsonObject !=null ){
                        JSONArray jsonArray = JSONObject.parseArray(jsonObject.getJSONObject("data").getString("records"));
                        if(jsonArray !=null  && jsonArray.size()>0){
                            for (Object o : jsonArray) {
                                CrawlerInfo crawlerInfo = new CrawlerInfo();
                                JSONObject content = JSONObject.parseObject(o.toString());
                                String articleUrl=  content.get("url").toString();
                                String source =  content.get("originalName").toString();
                                String pubDateStr=  content.get("inputTime").toString();
                                String title =  content.get("title").toString();
                                String[] split = articleUrl.split("/");
                                crawlerInfo.setId(split[split.length-1].replace(".html",""));
                                crawlerInfo.setSourceName("人民网");
                                crawlerInfo.setArticleUrl(articleUrl);
                                crawlerInfo.setLayout(source);
                                crawlerInfo.setScoureUrl("http://s.cnr.cn/search");
                                crawlerInfo.setArticleTime(stampToDate(Long.valueOf(pubDateStr) ));
                                crawlerInfo.setArticleName(filenameFilter( title));
                                System.out.println(crawlerInfo.toString());
                                crawlerInfoList.add(crawlerInfo);


                            }
                        }

                    }
                }
            }


        }
        return   crawlerInfoList ;




    }


    public  static  String  stampToDate(Long  timestamp){
        Timestamp ts = new Timestamp(System.currentTimeMillis());
        String tsStr = "";
        DateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
        try {
            //方法一
            tsStr = sdf.format(ts);
        }catch (Exception e) {
            e.printStackTrace();
        }
        return  tsStr;
    }




    /**
     *中国江苏网
     * @param keywords
     * @param page
     * @return
     * @throws Exception
     */
    public  static List<CrawlerInfo>  getChinaJiangsuNews(List<String > keywords,Integer page) throws Exception {
        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    //http://search.jschina.com.cn/cse/search?q=%E5%8D%97%E4%BA%AC&p=2&s=8349451817408476651&nsid=0
                    String Url = "http://search.jschina.com.cn/cse/search?q="+keyword+"&p="+(i-1)+"&s=8349451817408476651&nsid=0";
                    List<CrawlerInfo>  iCrawlerInfoList = getChinaJiangSuPageResponse(Url);
                    if(!CollectionUtils.isEmpty(iCrawlerInfoList)){
                        crawlerInfoList.addAll(iCrawlerInfoList);
                    }
                }
            }


        }
        return   crawlerInfoList ;


    }




    /**
     * 中国江苏网
     * http://search.jschina.com.cn/cse/search?q=%E5%8D%97%E4%BA%AC&p=0&s=8349451817408476651&nsid=0
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getChinaJiangSuPageResponse(String url) throws Exception {
        //   WebClient  webClient = getClient();

        String result =  HttpUtils.dogetJson( url);

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        Document doc = Jsoup.parse(result);

        Elements elementsByClass = doc.getElementsByClass("c-title");

        for (Element byClass : elementsByClass) {

            Elements children = byClass.children();
            CrawlerInfo crawlerInfo = new CrawlerInfo();

            for (Element child : children) {
                Elements hrefEle = child.getElementsByTag("a");
                if(hrefEle.size()>0){
                    String href = hrefEle.attr("href");
                    if(hrefEle.size()<=1 && !StringUtils.isEmpty(href) && !href.equals("#")&&href.contains(".shtml")){
                        crawlerInfo.setArticleUrl(href);
                        String title = hrefEle.text();

                        crawlerInfo.setId(getHrefId(  href,"shtml"));
                        crawlerInfo.setArticleName(title);
                        Elements timeEle = byClass.parent().getElementsByClass("c-showurl");

                        if(timeEle.size()>0){
                            String time = timeEle.text();
                            String[] split = time.split("... ");
                            crawlerInfo.setArticleTime(split[split.length-1]);
                        }
                        crawlerInfo.setScoureUrl("http://www.jschina.com.cn/");
                        crawlerInfo.setSourceName("中国江苏网");
                        System.out.println(crawlerInfo);
                        crawlerInfoList.add(crawlerInfo);
                    }

                }

            }

        }

        return crawlerInfoList;
    }

    /**
     * http://news.cri.cn/search/q?page=3&pageSize=25&type=0&qtext=%E5%8D%97%E4%BA%AC&lang=cn
     * 国际在线
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getGJZXageResponse(List<String > keywords,Integer page) throws Exception {

        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    String  url = "http://news.cri.cn/search/q?pageSize=25&type=0&lang=cn&qtext="+keyword+"&page="+i;

                    String s = HttpUtils.get(url);
                    JSONObject jsonObject = JSONObject.parseObject(s);
                    if(jsonObject !=null){
                        JSONArray jsonArray = JSONObject.parseArray(jsonObject.getString("data"));
                        if(jsonArray !=null  && jsonArray.size()>0){
                            for (Object o : jsonArray) {
                                CrawlerInfo crawlerInfo = new CrawlerInfo();
                                JSONObject content = JSONObject.parseObject(o.toString());
                                String source=  content.get("source").toString();
                                String pubDateStr=  content.get("publishTimeStr").toString();
                                String title =  content.get("title").toString();
                                String articleUrl =  content.getJSONObject("urls").get("pc").toString();


                                String[] split = articleUrl.split("/");
                                crawlerInfo.setId(split[split.length-1].replace(".html","").replace("-",""));
                                crawlerInfo.setSourceName("央广网国际在线");
                                crawlerInfo.setArticleUrl(articleUrl);
                                crawlerInfo.setLayout(source);
                                crawlerInfo.setScoureUrl("http://news.cri.cn/search");
                                crawlerInfo.setArticleTime(pubDateStr);
                                crawlerInfo.setArticleName(filenameFilter( title));
                                System.out.println(crawlerInfo.toString());
                                crawlerInfoList.add(crawlerInfo);


                            }
                        }

                    }
                }
            }


        }
        return   crawlerInfoList ;




    }




    /**
     * http://so.news.cn/getNews?keyword=%E5%8D%97%E4%BA%AC&curPage=1&sortField=0&searchFields=1&lang=cn
     * 新新华网
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getXinhuaResponse(List<String > keywords,Integer page) throws Exception {

        List<CrawlerInfo>  crawlerInfoList = new ArrayList<>();

        if(!CollectionUtils.isEmpty(keywords)){
            for(int i =1;i<=page;i++){
                for (String keyword : keywords) {
                    String  url = "http://so.news.cn/getNews?sortField=0&searchFields=1&lang=cn&keyword="+keyword+"&curPage="+i;

                    String s = HttpUtils.get(url);
                    JSONObject jsonObject = JSONObject.parseObject(s).getJSONObject("content");
                    if(jsonObject !=null){
                        JSONArray jsonArray = JSONObject.parseArray(jsonObject.getString("results"));
                        if(jsonArray !=null && jsonArray.size()>0){
                            for (Object o : jsonArray) {
                                CrawlerInfo crawlerInfo = new CrawlerInfo();
                                JSONObject content = JSONObject.parseObject(o.toString());
                                String source=  content.get("sitename").toString();
                                String pubDateStr=  content.get("pubtime").toString().substring(0,10);
                                String title =  content.get("title").toString();
                                String articleUrl =   content.get("url").toString();


                                crawlerInfo.setId(getHrefId(articleUrl,"htm")+new Random().nextInt(99999));
                                crawlerInfo.setSourceName("新华网");
                                crawlerInfo.setArticleUrl(articleUrl);
                                crawlerInfo.setLayout(source);
                                crawlerInfo.setScoureUrl("http://so.news.cn/");
                                crawlerInfo.setArticleTime(pubDateStr);
                                crawlerInfo.setArticleName( filenameFilter(title));
                                System.out.println(crawlerInfo.toString());
                                crawlerInfoList.add(crawlerInfo);


                            }
                        }

                    }
                }
            }


        }
        return   crawlerInfoList ;




    }



    public static String filenameFilter(String htmlStr) {
        String regEx_script = "<script[^>]*?>[\\s\\S]*?<\\/script>"; // 定义script的正则表达式
        String regEx_style = "<style[^>]*?>[\\s\\S]*?<\\/style>"; // 定义style的正则表达式
        String regEx_html = "<[^>]+>"; // 定义HTML标签的正则表达式

        Pattern p_script = Pattern.compile(regEx_script, Pattern.CASE_INSENSITIVE);
        Matcher m_script = p_script.matcher(htmlStr);
        htmlStr = m_script.replaceAll(""); // 过滤script标签

        Pattern p_style = Pattern.compile(regEx_style, Pattern.CASE_INSENSITIVE);
        Matcher m_style = p_style.matcher(htmlStr);
        htmlStr = m_style.replaceAll(""); // 过滤style标签

        Pattern p_html = Pattern.compile(regEx_html, Pattern.CASE_INSENSITIVE);
        Matcher m_html = p_html.matcher(htmlStr);
        htmlStr = m_html.replaceAll(""); // 过滤html标签

        String str = htmlStr.trim(); // 返回文本字符串
        Pattern FilePattern = Pattern.compile("[\\\\/:*?\"<>|]");
        str = str == null ? null : FilePattern.matcher(str).replaceAll("");
        str = str.replaceAll("\\s*", "").replaceAll("", "");
        return str;
    }



    /**
     * 学习强国
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getXXQGPageResponse(String url) throws Exception {
        WebClient  webClient = getClient();

        String result =  getHtml( url,  webClient);

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        Document doc = Jsoup.parse(result);

        Elements elementsByClass = doc.getElementsByClass("c-card");

        for (Element byClass : elementsByClass) {
            Elements children = byClass.children();
            for (Element child : children) {

                if(child.className().equals("rt-kpicture")){

                    Document dataDoc = Jsoup.parse(child.html());
                    CrawlerInfo crawlerInfo = new CrawlerInfo();
                    crawlerInfo.setScoureUrl(xxqgUrl);
                    String href = child.attr("href");
                    crawlerInfo.setSourceName("学习强国");
                    crawlerInfo.setId(href.substring(href.indexOf("item_id")+8,href.length()));

                    crawlerInfo.setArticleUrl(child.attr("href"));
                    crawlerInfo.setArticleName(dataDoc.getElementsByClass("title").text());
                    crawlerInfo.setArticleTime(dataDoc.getElementsByClass("time").text().replace("发布时间: ",""));
                    crawlerInfoList.add(crawlerInfo);

                }

            }

        }

        webClient.close();

        return crawlerInfoList;
    }



    /**
     * 央视新闻网
     * https://search.cctv.com/search.php?qtext=%E5%8D%97%E4%BA%AC&sort=relevance&type=web&vtime=&datepid=1&channel=&page=2
     * @param
     * @return
     * @throws Exception
     */
    public static List<CrawlerInfo> getCCTVNewsPageResponse(String url) throws Exception {
        WebClient  webClient = getClient();

        String result =  getHtml( url,  webClient);

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        Document doc = Jsoup.parse(result);

        Elements elementsByClass = doc.getElementsByClass("tright");

        for (Element byClass : elementsByClass) {
            Elements children = byClass.children();
            CrawlerInfo crawlerInfo = new CrawlerInfo();

            for (Element child : children) {
                Elements hrefEle = child.getElementsByTag("span");
                if(hrefEle.size()>0){
                    String href = hrefEle.attr("lanmu1");
                    if(!StringUtils.isEmpty(href)){
                        crawlerInfo.setArticleUrl(href);

                        crawlerInfo.setId(getHrefId(  href,"shtml"));

                    }

                }
                Elements titleEle = child.getElementsByTag("a");

                if(titleEle.size()>0){
                    String title = titleEle.text();
                    crawlerInfo.setArticleName(title);

                }
                Elements timeEle = child.getElementsByClass("tim");
                if(timeEle.size()>0){
                    String time = timeEle.text().substring(5,15);
                    crawlerInfo.setArticleTime(time);

                }






                }
            crawlerInfo.setScoureUrl("https://search.cctv.com/search.php");
            crawlerInfo.setSourceName("央视新闻网");
            System.out.println(crawlerInfo);
            crawlerInfoList.add(crawlerInfo);


        }



        webClient.close();

        return crawlerInfoList;
    }

    public  static String  getHrefId(String  href,String endString){
        String[] split = href.split("/");
        return  split[split.length-1].replace("."+endString,"");
    }

    public  static String  getHtml(String url, WebClient webClient) throws InterruptedException {
        HtmlPage page = null;
        try {
            webClient.getOptions().setUseInsecureSSL(true);
            page = webClient.getPage(url);
        } catch (Exception e) {
            e.printStackTrace();
            webClient.close();

        }

        //该方法阻塞线程
        webClient.waitForBackgroundJavaScript(30000);
        return   page.asXml();
    }




    public  static WebClient  getClient(){
        WebClient  webClient = new WebClient(BrowserVersion.CHROME);

        webClient.getOptions().setThrowExceptionOnScriptError(false);
        webClient.getOptions().setThrowExceptionOnFailingStatusCode(false);
        webClient.getOptions().setActiveXNative(false);
        webClient.getOptions().setCssEnabled(false);
        webClient.getOptions().setJavaScriptEnabled(true);
        webClient.setAjaxController(new NicelyResynchronizingAjaxController());
        webClient.getOptions().setTimeout(30000);
        webClient.setJavaScriptTimeout(30000);
        webClient.getOptions().setRedirectEnabled(true);
        return  webClient;



   }
    /**
     * 江苏工人报
     * http://epaper.jsgrb.com/Media/jsgrb/2016-07-15
     * @param
     * @return
     * @throws Exception
     */
    public static Map<String,String> getJiangsuGongrenBao(String url) throws Exception {
        WebClient  webClient = getClient();

        String result =  getHtml( url,  webClient);


        Document doc = Jsoup.parse(result);

        Elements elementsByClass = doc.getElementsByClass("bma");
        Map<String,String> resultMap = new HashMap<>();
        for (Element byClass : elementsByClass) {
            resultMap.put("http://epaper.jsgrb.com"+byClass.attr("href"),byClass.text());
//            CrawlerInfo crawlerInfo = new CrawlerInfo();
//            crawlerInfo.setScoureUrl("http://epaper.jsgrb.com/"+byClass.attr("href"));
//            crawlerInfo.setLayout(byClass.text());
//            crawlerInfo.setArticleName("江苏工人报");
//            crawlerInfo.setArticleTime(DateTimeUtil.getTodayChar8En());
//            crawlerInfoList.add(crawlerInfo);


        }



        webClient.close();

        return resultMap;
    }


}
