package org.jeecg.crawler.other;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.lang.time.DateFormatUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jeecg.common.util.DateTimeUtil;
import org.jeecg.modules.crawlerpaper.entity.CrawlerInfo;
import org.jeecgframework.p3.core.util.HttpUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.util.CollectionUtils;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

public class GettCrawler {


    /**
     * 中国纪检监察报
     * @return
     */
    public static List<CrawlerInfo>  getGongRenRiBao() throws Exception {
       String date =  DateFormatUtils.format(new Date(), "yyyy/MM/dd");
        String url = "http://i.workercn.cn/paper/grrb/"+date+".json";
        String result = HttpUtils.get(url);

        JSONArray jsonArray = JSONObject.parseObject(result).getJSONArray("pages");
        List<CrawlerInfo> resultList = new ArrayList<>();
        for (Object o : jsonArray) {
            JSONObject content = JSONObject.parseObject(o.toString());
            String[] ids = content.getString("id").split("-");
            String layLout = ids[ids.length-1];
             String sourceUrl = "http://media.workercn.cn/sites/paper/paper_index.html";
             String sourceName = "工人日报";
            JSONArray newsArray = content.getJSONArray("news");
            for (Object o1 : newsArray) {
                JSONObject news = JSONObject.parseObject(o1.toString());
                String title = news.getString("title");
                String articleContent = news.getString("content");
                String newsId = news.getString("id");
                String[] splits = newsId.split("-");
                String  articleUrl = " http://media.workercn.cn/sites/paper/paper_content.html?type=grrb&date="+DateTimeUtil.getTodayChar8En()+"&page="+splits[2]+"&index="+splits[3];
                CrawlerInfo crawlerInfo = new CrawlerInfo();
                crawlerInfo.setArticleTime( DateTimeUtil.getTodayChar8En());
                crawlerInfo.setArticleName(title);
                crawlerInfo.setLayout(layLout);
                crawlerInfo.setScoureUrl(sourceUrl);
                crawlerInfo.setArticleUrl(articleUrl);
                crawlerInfo.setId(newsId);
                crawlerInfo.setArticleContent(articleContent);
                crawlerInfo.setSourceName(sourceName);
                resultList.add(crawlerInfo);
            }

        }
        return  resultList;
    }

    /**
     * http://www.zijinshan.org/news/getSearchNews?pageSize=20&pageNumber=1&keyword=%E7%8E%84%E6%AD%A6
     * 紫金山新闻
     * @return
     */
    public static List<CrawlerInfo>  getZiJingShanNews(List<String > keywords,Integer page) throws Exception {

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        if (!CollectionUtils.isEmpty(keywords)) {
            for (int i = 1; i <= page; i++) {
                for (String keyword : keywords) {
                    String url = "http://www.zijinshan.org/news/getSearchNews?pageSize=20&pageNumber=" + i + "&keyword=" + keyword ;
//                    HashMap<String,Object> paramMap = new HashMap<>();
//                    paramMap.put("pageSize",20);
//                    paramMap.put("pageNumber",i);
//                    paramMap.put("keyword",keyword);

                    String result = doGet(url);
                    String sourceUrl = "http://www.zijinshan.org/search";
                    String sourceName = "紫金山新闻";
                    JSONArray newsArray = JSON.parseObject(result).getJSONObject("data").getJSONArray("pageData");
                    for (Object o : newsArray) {
                        JSONObject jsonObject = JSON.parseObject(o.toString());
                        String title = jsonObject.getString("title");
                        String shareUrl = jsonObject.getString("share_url");
                        String id = jsonObject.getString("news_id");
                        String time = jsonObject.getString("published_at");
                        if(time.contains("前")){
                            time  =  DateTimeUtil.getTodayChar8En();
                        }else {
                            time = DateTimeUtil.getTodayYear()+"-"+time;
                        }

                        CrawlerInfo crawlerInfo = new CrawlerInfo();

                        crawlerInfo.setId(id);
                        crawlerInfo.setArticleTime(time);
                        crawlerInfo.setArticleName(title);

                        crawlerInfo.setArticleUrl(shareUrl);
                        crawlerInfo.setSourceName(sourceName);
                        crawlerInfo.setScoureUrl(sourceUrl);
                        System.out.println(crawlerInfo.toString());
                        crawlerInfoList.add(crawlerInfo);

                     }

                    }


            }


        }
        return  crawlerInfoList;
    }




    /**
     * 江南时报
     * @return
     */
    public static List<CrawlerInfo>  getJiangNanShiBao(List<String > keywords,Integer page) throws Exception {
        //http://so.jschina.com.cn/was5/web/search?page=2&channelid=200348&searchword=%E7%8E%84%E6%AD%A6&keyword=%E7%8E%84%E6%AD%A6&orderby=-DocPubTime&token=16.1422004478651.24&perpage=10&outlinepage=10&searchscope=&timescope=&timescopecolumn=&orderby=-DocPubTime

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        if (!CollectionUtils.isEmpty(keywords)) {
            for (int i = 1; i <= page; i++) {
                for (String keyword : keywords) {
                    String url = "http://so.jschina.com.cn/was5/web/search?page=" + i + "&channelid=200348&searchword=" + keyword + "&keyword=" + keyword + "&perpage=10&outlinepage=10&orderby=-DocPubTime";
                    String result = HttpUtils.get(url);
                    Document doc = Jsoup.parse(result);

                    Elements elementsByClass = doc.getElementsByClass("searchresult");


                    for (Element byClass : elementsByClass) {
                        Elements li = byClass.getElementsByTag("li");
                        for (Element element : li) {
                            String href = element.getElementsByTag("a").get(0).attr("href");
                            String title =  element.getElementsByTag("a").get(0).text();
                            String pubtime =element.getElementsByClass("pubtime").text();
                            CrawlerInfo crawlerInfo = new CrawlerInfo();

                            crawlerInfo.setId(SearchNewsCrawler.getHrefId(href,"shtml"));
                            crawlerInfo.setArticleTime(pubtime.substring(0,10).replace(".","-"));
                            crawlerInfo.setArticleName(title);

                            crawlerInfo.setArticleUrl(href);
                            crawlerInfo.setSourceName("江南时报网搜索");
                            crawlerInfo.setScoureUrl("http://www.jntimes.cn/");
                            System.out.println(crawlerInfo.toString());
                            crawlerInfoList.add(crawlerInfo);
                        }

                    }
                }


            }
        }
        return crawlerInfoList;

    }




    /**
     * 新浪新闻
     *
     * @return
     */
    public static List<CrawlerInfo>  getSinaNews(List<String > keywords,Integer page) throws Exception {
        //https://search.sina.com.cn/news?q=%E5%8D%97%E4%BA%AC&c=news&from=home&range=all&size=10&dpc=0&ps=0&pf=0&page=2

        List<CrawlerInfo> crawlerInfoList = new ArrayList<>();

        if (!CollectionUtils.isEmpty(keywords)) {
            for (int i = 1; i <= page; i++) {
                for (String keyword : keywords) {
                    String url = "https://search.sina.com.cn/news?q=" + keyword + "&from=home&range=all&size=10&dpc=0&ps=0&pf=0&page=" +i ;
                    String result = HttpUtils.get(url);
                    Document doc = Jsoup.parse(result);

                    Elements elementsByClass = doc.getElementsByClass("box-result clearfix");


                    for (Element byClass : elementsByClass) {

                            String href = byClass.getElementsByTag("a").get(0).attr("href");
                            String title =  byClass.getElementsByTag("a").get(0).text();
                            String pubtime =byClass.getElementsByClass("fgray_time").text();
                            String[] time = pubtime.split(" ");
                            if(time[1].contains("前")){
                              pubtime = DateTimeUtil.getTodayChar8En();
                            }else {
                                pubtime = time[1];
                            }
                          CrawlerInfo crawlerInfo = new CrawlerInfo();

                            crawlerInfo.setId(SearchNewsCrawler.getHrefId(href,"").replace("article_",""));
                            crawlerInfo.setArticleTime(pubtime);
                            crawlerInfo.setArticleName(title);
                            crawlerInfo.setLayout(time[0]);
                            crawlerInfo.setArticleUrl(href);
                            crawlerInfo.setSourceName("新浪新闻");
                            crawlerInfo.setScoureUrl("https://search.sina.com.cn/");
                            System.out.println(crawlerInfo.toString());
                           crawlerInfoList.add(crawlerInfo);
                        }


                }


            }
        }
        return crawlerInfoList;

    }







    public static void main(String[] args) throws Exception {
        List<String > keywords  = new ArrayList<>();
        keywords.add("玄武");
       // getSinaNews(keywords,2);
        String result = HttpUtils.get("https://cdn77-vid.xvideos-cdn.com/SPWJulYVO7avAufhjib94w==,1639638270/videos/hls/25/f8/ce/25f8ceea89aa81c42c0cd036f1e22915/hls-1080p-b494a.m3u8");
        System.out.println(result);
       // httpURLConectionGET("http://www.zijinshan.org/news/getSearchNews?pageSize=20&pageNumber=4&keyword=%E7%8E%84%E6%AD%A6");
    }


    /**
     * 接口调用 GET
     */
    public static String doGet(String url) throws IOException {
        //1. 打开浏览器 创建httpclient对象
        CloseableHttpClient httpClient = HttpClients.createDefault();
//2. 输入网址
        HttpGet httpGet = new HttpGet(url);
        httpGet.addHeader("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36");
//3. 发送请求
        CloseableHttpResponse httpResponse = httpClient.execute(httpGet);
//4. 响应结果
        HttpEntity httpEntity = httpResponse.getEntity();
//5. 解析结果
        String result = EntityUtils.toString(httpEntity, "utf-8");
      return  result;
    }



}
