package com.asiainfo.jianshu;

import cn.edu.hfut.dmic.contentextractor.ContentExtractor;
import cn.edu.hfut.dmic.contentextractor.News;
import cn.edu.hfut.dmic.webcollector.fetcher.Executor;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import com.asiainfo.utils.JdbcHelper;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.htmlunit.HtmlUnitDriver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.JdbcTemplate;

/**
 * LuoJianshu
 *
 * @Author jhr
 * @Date 2022/3/28
 */
public class LuoJianshuCrawler extends BreadthCrawler {
    /**
     * 构造一个基于伯克利DB的爬虫
     * 伯克利DB文件夹为crawlPath，crawlPath中维护了历史URL等信息
     * 不同任务不要使用相同的crawlPath
     * 两个使用相同crawlPath的爬虫并行爬取会产生错误
     *
     * @param crawlPath 伯克利DB使用的文件夹
     * @param autoParse 是否根据设置的正则自动探测新URL
     */
    private static Logger logger = LoggerFactory.getLogger(LuoJianshuCrawler.class);
    private static String crawlPath = "/Users/jhr/data/db/jianshuLuo";
    //    private  static String crawlPath = "";
//    public static String seed="https://www.jianshu.com/u/e39da354ce50?order_by=shared_at&page=3";
    public static String seed = "";
    public static int sum; //文章数
    //    查找匹配的网站
//    public static  String RegularUrl="https://www.jianshu.com/p/.*";
    public static String RegularUrl = "";
    static JdbcTemplate jdbcTemplate = null;


    public LuoJianshuCrawler(String Seed, String regularUrl) {
        super(crawlPath, false);
        seed = Seed;
        RegularUrl = regularUrl;
//        crawlPath = CrawlPath;
        CrawlDatum crawlDatum = new CrawlDatum(seed).meta("depth", "2");
        addSeed(crawlDatum);
        this.addRegex(RegularUrl);
        setThreads(20);
    }

    @Override
    public void visit(Page page, CrawlDatums next) {

        //        判断是否匹配正则字符URL
        if (page.matchUrl(RegularUrl)) {
//            匹配正则的url，进入这里，即文章列表，获取标题与内容
//            标题，h1，class为_1RuRku
            String title = page.select("h1._1RuRku").text();
//            正文：article下的class为_2rhmJa的所有p标签内容
            String context = page.select("article._2rhmJa>p").text();
//            Elements editTime = page.select("div._gp-ck");
//            System.out.println("editTime"+editTime);
//                    .substring(editTime.indexOf("time>")+1, editTime.lastIndexOf("<time")));
//            System.out.println("page.doc"+page.doc());
            System.out.println("URL:" + page.url());
            System.out.println("标题:" + title);
            System.out.println("内容:" + context);

            News news = null;
            try {
                news = ContentExtractor.getNewsByUrl(page.url());
            } catch (Exception e) {
                e.printStackTrace();
            }
            System.out.println("爬取网址：" + news.getUrl());
            System.out.println("发布时间：" + news.getTime());
            System.out.println("文章标题：" + news.getTitle());
            System.out.println("文章内容：" + news.getContent());

            /**
             * news.getContentElement() 返回的是正文所在的标签元素
             */
            Element contentElement = news.getContentElement();
            System.out.println("正文内容标签：" + contentElement.tagName());
            System.out.println("正文内容标签样式：" + contentElement.className());

            /** 根据标签名递归查询正文下的图片标签
             * 同理可以获取正文标签下其它任意想要获取的内容*/
            Elements elements = contentElement.getElementsByTag("img");
            if (elements != null && elements.size() > 0) {
                Element loopElement = null;
                for (int i = 0; i < elements.size(); i++) {
                    loopElement = elements.get(i);
                    System.out.println("图片地址：" + loopElement.attr("src"));
                }
            }
            //            数据添加到数据库中
//            if (jdbcTemplate != null) {
////                查询重复数据
//                String query = "select count(*) from tb_content where title=? and url=?";
//                Integer i = jdbcTemplate.queryForObject(query, new Object[]{title, page.url()}, Integer.class);
//                if (i == 0) {
//                    int updates = jdbcTemplate.update("insert into webCollectorTest.tb_content(title,url,html) values (?,?,?)", title, page.url(), context);
//                    if (updates == 1) {
//                        System.out.println("添加成功");
//                    }
//                } else {
//                    System.out.println("数据库中已经存在此数据，重复添加！！！");
//                }
//            }
        } else {
           sum =  Integer.parseInt(page.select("div.meta-block>a>p").get(2).text());
//            System.out.println("count:"+sum);
//            System.out.println(page.select("div.meta-block>a>p").get(2).text());
            //            否则进入首页列表，爬取首页列表下的符合正则的即文章的列表.id为list-container下的a标签
            Elements aBody = page.select("div#list-container>ul>li>div>a");
            for (int i = 0; i < aBody.size(); i++) {
                Element element = aBody.get(i);
                logger.debug("获取到匹配的url=" + element.attr("abs:href"));
                String regRx = RegularUrl;
//                匹配符合正则的网站
                if (element.attr("abs:href").matches(regRx)) {
                    CrawlDatum meta = new CrawlDatum(element.attr("abs:href")).meta("depth", "1").meta("refer", seed);
                    next.add(meta);
                } else {
                    System.out.println("正则URL不匹配！！！");
                }

            }
        }

        return ;
    }

    public static void main(String[] args) throws Exception {
        //        初始化jdbc
        try {
            jdbcTemplate = JdbcHelper.createMysqlTemplate("mysql", "jdbc:mysql://localhost:3306/webCollectorTest?characterEncoding=utf8&useSSL=false&serverTimezone=UTC&rewriteBatchedStatements=true", "root", "password", 5, 30);
//            jdbcTemplate.execute("CREATE TABLE `tb_content` (" +
//                    "  `id` int NOT NULL AUTO_INCREMENT," +
//                    "  `title` varchar(50) CHARACTER SET utf8 DEFAULT NULL," +
//                    "  `url` varchar(200) CHARACTER SET utf8 DEFAULT NULL," +
//                    "  `html` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci," +
//                    "  PRIMARY KEY (`id`)" +
//                    ") ENGINE=MyISAM AUTO_INCREMENT=18 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;");//自行选择是否使用语句创建表
            System.out.println("初始化成功！！！");
        } catch (Exception e) {
            jdbcTemplate = null;
            System.out.println("mysql未开启或JDBCHelper.createMysqlTemplate中参数配置不正确!");
        }
//        简书用户:罗湿兄
        String pageUrl = "https://www.jianshu.com/u/384aed1f5469?order_by=shared_at&page=";
        LuoJianshuCrawler crawler = new LuoJianshuCrawler(pageUrl.replaceAll("page=", "page=" + 1), "https://www.jianshu.com/p/.*");
        crawler.start(2);
        System.out.println("发表了" + sum + "篇文章");
//        获取文章数，如果文章数大于9，继续爬取
        if (sum > 9) {
            System.out.println("==========文章数大于9，继续按页数爬取========");
            for (int i = 2; i < sum / 9 + 1; i++) {
                LuoJianshuCrawler crawler2 = new LuoJianshuCrawler(pageUrl.replaceAll("page=", "page=" + i), "https://www.jianshu.com/p/.*");
                crawler2.start(2);
            }
        }
//        }

    }

}
