package com.asiainfo.souhu;

import cn.edu.hfut.dmic.contentextractor.ContentExtractor;
import cn.edu.hfut.dmic.contentextractor.News;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Links;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;

/**
 * ZhouSouhu
 * 周蓬安的搜狐号:https://m.sohu.com/media/115438
 *
 * @Author jhr
 * @Date 2022/3/28
 */
public class ZhouSouhu extends BreadthCrawler {

    /**
     * 构造一个基于伯克利DB的爬虫
     * 伯克利DB文件夹为crawlPath，crawlPath中维护了历史URL等信息
     * 不同任务不要使用相同的crawlPath
     * 两个使用相同crawlPath的爬虫并行爬取会产生错误
     *
     * @param crawlPath 伯克利DB使用的文件夹
     * @param autoParse 是否根据设置的正则自动探测新URL
     */
    private static Logger logger = LoggerFactory.getLogger(ZhouSouhu.class);
    private static String crawlPath = "./souhu/zhou";
    //    static String RootUrl = "https://mp.sohu.com/profile?xpt=d3VodWRzekB5YWhvby5jb20uY24="; //周蓬安的搜狐号首页
    static String RootUrl = "";
    //    接口url：获取每篇文章的url,改变pNo后的数值，实现获取信息
//    static String IUrl = "https://v2.sohu.com/author-page-api/author-articles/pc/115438?pNo=1";
    static String IUrl = "";
    //    某一个文章的url
//    static String seed = "https://www.sohu.com/a/408149269_115438";
    static String seed = "";
    //    匹配文章信息的URL
//    static String RegularUrl = "https://www.sohu.com/a/.*_115438";
    static String RegularUrl = "";

    public ZhouSouhu(String Seed, String regularUrl) {
        super(crawlPath, false);
        IUrl = Seed;
        RegularUrl = regularUrl;
//        crawlPath = CrawlPath;
        CrawlDatum crawlDatum = new CrawlDatum(IUrl).meta("depth", "2");
        addSeed(crawlDatum);
        this.addRegex(RegularUrl);
        setThreads(1);
    }

    @Override
    public void visit(Page page, CrawlDatums next) {
        if (page.matchUrl(RegularUrl)) {
            News news = null;
            try {
                news = ContentExtractor.getNewsByUrl(page.url());
            } catch (Exception e) {
                e.printStackTrace();
            }
            System.out.println("爬取网址：" + news.getUrl());
            System.out.println("发布时间：" + news.getTime());
            System.out.println("文章标题：" + news.getTitle());
            System.out.println("文章内容：" + news.getContent());

            /**
             * news.getContentElement() 返回的是正文所在的标签元素
             */
            Element contentElement = news.getContentElement();
            System.out.println("正文内容标签：" + contentElement.tagName());
            System.out.println("正文内容标签样式：" + contentElement.className());

            /** 根据标签名递归查询正文下的图片标签
             * 同理可以获取正文标签下其它任意想要获取的内容*/
            Elements elements = contentElement.getElementsByTag("img");
            if (elements != null && elements.size() > 0) {
                Element loopElement = null;
                for (int i = 0; i < elements.size(); i++) {
                    loopElement = elements.get(i);
                    System.out.println(loopElement);
                    System.out.println("图片地址：" + loopElement.attr("src"));
                }
            } else {
           /* https://m.sohu.com/media/115438   11538为搜狐用户id
            1. 解析json，拿到文章信息url
               主页url： static String RootUrl = "https://mp.sohu.com/profile?xpt=d3VodWRzekB5YWhvby5jb20uY24="; //周蓬安的搜狐号首页
               接口url：获取每篇文章的url,改变pNo后的数值，实现获取信息
               static String IUrl = "https://v2.sohu.com/author-page-api/author-articles/pc/115438?pNo=1";
            否则进入首页列表，爬取首页列表下的符合正则的即文章的列表.id为list-container下的a标
            */
                JsonElement jsonElement = getLink(IUrl);
                System.out.println(jsonElement);
                JsonArray jsonArray = jsonElement.getAsJsonArray();
//            json数据,数组格式
                System.out.println("asJsonObject" + jsonArray);
                System.out.println("获取个数：" + jsonArray.size());
                for (int i = 0; i < jsonArray.size(); i++) {
                    JsonElement jsonElement2 = jsonArray.get(i);
                    String link = String.valueOf(jsonElement2.getAsJsonObject().get("link"));
//                获取到link后，去除前后的",添加https://
                    link = "https://" + link.replaceAll("\"", "");
                    System.out.println("获取到link数组：" + link.replace("\"", ""));
                    String regRx = RegularUrl;
//                匹配符合正则的网站
                    if (link.matches(regRx)) {
                        CrawlDatum meta = new CrawlDatum(link).meta("depth", "1").meta("refer", link);
                        next.add(meta);
                    } else {
                        System.out.println("正则URL不匹配！！！");
                    }
                }
            }
        }
        return ;
    }

        //  获取接口url中的具体文章url
        private JsonElement getLink (String s){
//        System.out.println(s+1);
            String jsonStr = getHtmlByUrl(s);
            JsonParser parse = new JsonParser();  //创建json解析器
            JsonObject jsonObject = (JsonObject) parse.parse(jsonStr);
            JsonElement jsonElement = jsonObject.get("data").getAsJsonObject().get("pcArticleVOS");
//带有link的数组，一次20个
            return jsonElement;
        }

        //   接口解析
        public String getHtmlByUrl (String url){
            String text = null;
            int code = 0;
            try {
                while (code != 200) {
                    StringBuffer html = new StringBuffer();
                    URL Url = new URL(url);
                    HttpURLConnection conn = (HttpURLConnection) Url.openConnection();
                    //这个很必要，否则就是403
                    conn.setRequestProperty("User-Agent", "Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)");
                    code = conn.getResponseCode();
                    InputStreamReader isr = new InputStreamReader(conn.getInputStream());
                    BufferedReader br = new BufferedReader(isr);
                    String temp;
                    while ((temp = br.readLine()) != null) {
                        html.append(temp).append("\n");
                    }
                    br.close();
                    isr.close();
                    text = html.toString();
                }
            } catch (Exception e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
            return text;
        }


        public static void main (String[]args) throws Exception {
//        接口：url，获取每篇文章的url
            String inter = "https://v2.sohu.com/author-page-api/author-articles/pc/115438?pNo=";
            for (int i = 1; i <= 1; i++) {
                System.out.println("pNo=" + i);
                String formatInter = String.format("https://v2.sohu.com/author-page-api/author-articles/pc/115438?pNo=%d", i);
                System.out.println("fromat:" + formatInter);
                ZhouSouhu crawler = new ZhouSouhu(formatInter, "https://www.sohu.com/a/.*_115438");
                crawler.start(3);
            }
        }
    }
