package com.asiainfo.souhu;

import cn.edu.hfut.dmic.contentextractor.ContentExtractor;
import cn.edu.hfut.dmic.contentextractor.News;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;

/**
 * BusinessIndex
 * 搜狐财经首页：https://business.sohu.com/
 *
 * @Author jhr
 * @Date 2022/4/11
 */
public class BusinessIndex extends BreadthCrawler {

    /**
     * 构造一个基于伯克利DB的爬虫
     * 伯克利DB文件夹为crawlPath，crawlPath中维护了历史URL等信息
     * 不同任务不要使用相同的crawlPath
     * 两个使用相同crawlPath的爬虫并行爬取会产生错误
     *
     * @param crawlPath 伯克利DB使用的文件夹
     * @param autoParse 是否根据设置的正则自动探测新URL
     */
    private static final Logger logger = LoggerFactory.getLogger(BusinessIndex.class);
    private static final String crawlPath = "/Users/jhr/data/db/BusinessIndex";
    //    private  static String crawlPath = "";
    //    搜狐财经首页：https://business.sohu.com/
    public static String seed = "";
    //    查找匹配的网站 https://www.sohu.com/a/.*
    public static String RegularUrl = "";

    //    static JdbcTemplate jdbcTemplate = null;
    public BusinessIndex(String Seed, String regularUrl) {
        super(crawlPath, false);
        seed = Seed;
        RegularUrl = regularUrl;
//        crawlPath = CrawlPath;
        CrawlDatum crawlDatum = new CrawlDatum(seed).meta("depth", "2");
        addSeed(crawlDatum);
        this.addRegex(RegularUrl);
        setThreads(1);
    }

    @Override
    public void visit(Page page, CrawlDatums next) {
        try {
            URL Url = new URL(page.url());
            HttpURLConnection conn = (HttpURLConnection) Url.openConnection();
            conn.setRequestProperty("User-Agent", "Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)");
        } catch (Exception e) {
            e.printStackTrace();
        }
        if (page.matchUrl(RegularUrl)) {
            System.out.println(page.url());
            News news = null;
            try {
                news = ContentExtractor.getNewsByUrl(page.url());
            } catch (Exception e) {
                e.printStackTrace();
            }
            assert news != null;
            System.out.println("爬取网址：" + news.getUrl());
//            System.out.println("发布时间：" + news.getTime());
            System.out.println("文章标题：" + news.getTitle());
            System.out.println("文章内容：" + news.getContent());
            /*
             * 发布时间,作者,阅读量为空的情况，进行分类处理
             */
//            发布时间
            String getTime = page.select("span#news-time").text();
            if (getTime.equals("")) {
                getTime = page.select("p.article-info>span").text();
            }
            System.out.println("发布时间：" + getTime);

//            获取作者
            String author = page.select("div.article-info>span>a").text();
            if (author.equals("")) {
                author = page.select("a.name").text();
//               可能会获取到多个值。通过" "区分
                if (author.contains(" ")) {
                    String[] s = author.split(" ");
                    author = s[s.length - 1];
                }
            }
            if (author.equals("")) {
                author = page.select("div.article-info").text();
                author = author.substring(author.indexOf("来源: ") + 3, author.length()).trim();
//                System.out.println("div.article-info" + author);
            }
            System.out.println("作者:" + author);

//            阅读量， 不同的url，获取不同，有的不需要js渲染，直接获取，有的需要js渲染，需要获取js渲染后的数据
            String readNum = page.select("div.read-num").text();
//            readNum = readNum.substring(readNum.indexOf("(") + 1, readNum.indexOf(")"));
            if (readNum.equals("")) {
//              通过url，拿到id，然后通过id获取阅读量
                String articleId = page.url().substring(page.url().indexOf("a/") + 2, page.url().indexOf("_"));
//                System.out.println(articleId);
//              解析url，获取js渲染数据
                String url = "https://v2.sohu.com/public-api/articles/" + articleId + "/pv?callback=jQuery112409835205263387437_1649658914462&_=1649658914463";
                String js = getHtmlByUrl(url);
//                System.out.println(js.replaceAll(" +", ""));
                readNum = js.substring(js.indexOf("(") + 1, js.indexOf(")"));
            }
            if (readNum.contains("万")) {
                readNum = readNum.replaceAll("万", "0000");
            }
            System.out.println("阅读量:" + readNum);


            String source = "搜狐财经";
            System.out.println("来源：" + source);
//            System.out.println("contentElement"+contentElement);
        } else {
            Elements elements = page.select("a");
            System.out.println("获取到链接个数：" + elements.size());
            for (Element element : elements) {
                logger.debug("获取到匹配的url=" + element.attr("abs:href"));
                String regRx = RegularUrl;
//                匹配符合正则的网站
                if (element.attr("abs:href").matches(regRx)) {
                    CrawlDatum meta = new CrawlDatum(element.attr("abs:href")).meta("depth", "1");
//                            .meta("refer", seed);
                    next.add(meta);
                } else {
                    System.out.println("正则URL不匹配！！！");
                }
            }
        }

        return ;
    }

    public String getHtmlByUrl(String url) {
        String text = null;
        int code = 0;
        try {
            while (code != 200) {
                StringBuffer html = new StringBuffer();
                URL Url = new URL(url);
                HttpURLConnection conn = (HttpURLConnection) Url.openConnection();
                //这个很必要，否则就是403
                conn.setRequestProperty("User-Agent", "Mozilla/4.0 (compatible; MSIE 5.0; Windows NT; DigExt)");
                code = conn.getResponseCode();
                InputStreamReader isr = new InputStreamReader(conn.getInputStream());
                BufferedReader br = new BufferedReader(isr);
                String temp;
                while ((temp = br.readLine()) != null) {
                    html.append(temp).append("\n");
                }
                br.close();
                isr.close();
                text = html.toString();
            }
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        return text;
    }


    public static void main(String[] args) throws Exception {
        String seed = "https://business.sohu.com/";
        String regular = "https://www.sohu.com/a/.*";
        BusinessIndex businessIndex = new BusinessIndex(seed, regular);
        businessIndex.start(2);
    }
}
