package common.main.news36Ke;

import com.google.gson.Gson;
import common.http.HtmlInfo;
import common.http.SimpleHttp;
import common.main.google.NotFoundException;
import common.utils.DomTree;
import common.utils.MD5Util;
import org.apache.log4j.Logger;
import org.w3c.dom.DocumentFragment;
import org.w3c.dom.NodeList;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;

public class News36KeMain {
    static Logger logger = Logger.getLogger(News36KeMain.class);
    static String listModelUrl = "https://36kr.com/api/search-column/mainsite?per_page=20&page=<page>";
    static String detailInfoModelUrl = "https://36kr.com/p/<newsId>.html";
    static String ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36";
    static String encode = "utf-8";
    static SimpleHttp http = new SimpleHttp();

    public static void main(String[] args) throws InterruptedException {
        //查询数据库获取所有的MD5，防止重复
        String allMd5 = News36KeSql.getAllMd5();

       while (true){

           //保存数据时，新闻个数为0的次数，如果连续10页都是0，则认为10页之后的数据已经采集过。
           int sizeIs0Num = 0;

           for(int i = 1;i < 101;i++){
               String url = listModelUrl.replace("<page>",i + "");
               HtmlInfo html = new HtmlInfo();
               html.setEncode(encode);
               html.setUa(ua);
               html.setOrignUrl(url);
               logger.info("等待15s后获取第" + i + "页列表信息");
               Thread.sleep(15 * 1000);
               http.simpleGet(html);
               Gson gson = new Gson();
               Map listMap = gson.fromJson(html.getContent(),Map.class);
               List<Map> list = (List<Map>)((Map)listMap.get("data")).get("items");
               List<News36KeEntity> newsList = new ArrayList<>();
               //循环新闻列表获取新闻详细信息
               for(Map news : list){

                   News36KeEntity entity = new News36KeEntity();

                   //获取摘要
                   entity.setBrief((String) news.get("summary"));

                   double newsId = (double)news.get("id");
                   String detailUrl = detailInfoModelUrl.replace("<newsId>",(newsId + "").replace(".0",""));
                   //获取新闻url
                   entity.setUrl(detailUrl);

                   entity.setMd5(MD5Util.MD5(entity.getUrl()));
                   //如果包含该MD5则跳过，防止重复
                   if(allMd5.contains(entity.getMd5())){
                       logger.info("该文章已存在数据库中，跳过采集");
                       continue;
                   }
                   HtmlInfo detailHtml = new HtmlInfo();
                   detailHtml.setUa(ua);
                   detailHtml.setEncode(encode);
                   detailHtml.setOrignUrl(detailUrl);
                   //获取标题
                   entity.setTitle((String)news.get("title"));

                   logger.info("等待15s后获取《" + entity.getTitle() + "》的详细信息");
                   Thread.sleep(15 * 1000);
                   try{
                       http.simpleGet(detailHtml);
                       if(detailHtml.getRealUrl() != null && detailHtml.getRealUrl().contains("/goods/")){
                            logger.info("付费文章，跳过采集");
                       }
                   }catch (NotFoundException e){
                       logger.info("获取网页信息失败，跳过采集");
                       continue;
                   }
                   DocumentFragment jsonContentNode = DomTree.getNode(detailHtml.getContent(),detailHtml.getEncode());
                   if(jsonContentNode == null){
                       logger.info("获取新闻信息失败，跳过采集");
                       continue;
                   }
                   NodeList jsonContentNodeList = DomTree.commonList(News36KeXpath.json_content_Xpath,jsonContentNode);
                   String detailJsonContent = null;
                   if(jsonContentNodeList != null){
                       for(int j = 0;j < jsonContentNodeList.getLength();j++){
                           String jsonContent = jsonContentNodeList.item(j).getTextContent();
                           if(jsonContent.contains("var props={")){
                               detailJsonContent = jsonContent.split("var props=")[1].split(",locationnal=")[0];
                               break;
                           }
                       }
                   }
                   if(detailJsonContent != null){
                       Map map = gson.fromJson(detailJsonContent,Map.class);
                       Map detailArticle = (Map)map.get("detailArticle|post");
                       //获取带有HTML标签的文章内容
                       String htmlContent = (String) detailArticle.get("content");
                       entity.setHtmlContent(htmlContent);
                       //获取内容
                       DocumentFragment contentNode = DomTree.getNode((String) detailArticle.get("content"),"utf-8");
                       NodeList contentNodeList = DomTree.commonList(News36KeXpath.contentXpath,contentNode);
                       if(contentNodeList != null){
                           String content = null;
                           for(int a = 0;a < contentNodeList.getLength();a++){
                               content = content + contentNodeList.item(a).getTextContent() + "\n";
                           }
                           entity.setContent(content);
                       }

                       //获取内容中的图片
                       NodeList imgUrlNodeList = DomTree.commonList(News36KeXpath.imgUrlXpath,contentNode);
                       if(imgUrlNodeList != null){
                           String imgUrl = null;
                           for(int a = 0;a < imgUrlNodeList.getLength();a++){
                               imgUrl = imgUrl + imgUrlNodeList.item(a).getTextContent() + ",";
                           }
                           entity.setImgUrl(imgUrl);
                       }

                       //获取标签
                       String label = null;
                       String labelListStr = ((String) detailArticle.get("extraction_tags")).replace("[","")
                               .replace("]","").replace("\\","");
                       String[] labels = labelListStr.split(",");
                       for(int a = 0;a < labels.length/3;a++){
                           label =label + labels[a * 3].replace("\"","") + ",";
                       }
                       if(label != null && label.contains("人工智能")){
                           entity.setLabel(label);
                       }else {
                           logger.info("该文章不包含《人工智能》标签，跳过采集");
                           continue;
                       }

                       //获取作者
                       entity.setAuthor((String) ((Map)detailArticle.get("user")).get("name"));
                       //获取发表时间
                       entity.setPubTime((String) detailArticle.get("published_at"));
                   }else{
                       logger.info("获取文章信息失败，跳过采集");
                       continue;
                   }

                   allMd5 = allMd5 + entity.getMd5();

                   newsList.add(entity);
                   logger.info("获取文章信息成功。");
               }
                //判断要保存的新闻个数
               if(newsList.size() == 0){
                   if(sizeIs0Num == 10){
                       logger.info("连续10页没有新数据，停止翻页。");
                       break;
                   }
                   sizeIs0Num++;
                   continue;
               }
               //保存数据
               logger.info("该页数据采集完成，保存到数据库");
               //保存数据。
                News36KeSql.saveData(newsList);
               logger.info("保存成功，size: " + newsList.size());

           }

           logger.info("一小时后继续采集");
           Thread.sleep(60 * 60 * 1000);
       }


    }

}
