package com.example.spiders;

import com.example.spiders.constant.ConfigConstant;
import com.example.spiders.constant.UrlConstant;
import com.example.utils.FileUtils;
import com.example.utils.HttpClientUtils;
import com.example.utils.SSLUtils;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import lombok.extern.log4j.Log4j2;
import org.apache.commons.lang3.StringUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Node;
import org.jsoup.select.Elements;

/**
 * @Description: 小说爬虫(笔阁趣)
 * @Auther: Swain
 * @Date: 2021/1/18 15:43
 * @Version 1.0.0
 */
@Log4j2
public class NovelSpider {


    /**
     * @param novelId 小说对应的id 如：https://www.biquge5200.com/130_130510/  超品命师 -- 130_130510
     * @return void
     * @Description 根据小说id爬取对应的小说，并将其保存在配置的文件夹内
     * @Author swain
     * @Date 2021/1/18
     */
    public static void crawling(String novelId) {
        try {
            // http请求忽略证书
            SSLUtils.init();
            // 小说目录页
            String novelHtml = HttpClientUtils.getHtml(UrlConstant.BGQ_NOVEL_URL + novelId);
            Document document = Jsoup.parse(novelHtml);
            // 小说名称
            String novelName = document.getElementById("info").select("h1").text();
            // 章节list
            Elements chapterList = document.getElementsByTag("dd");

            long startTimeMillis = System.currentTimeMillis();
            log.info("====== 开始爬取:《" + novelName + "》，一共[" + chapterList.size() + "】章节 ======");

            // 创建小说文件
            String novelFilePath = ConfigConstant.NOVEL_ROOT_FILE_PATH + novelName + ".txt";
            File file = FileUtils.createFile(novelFilePath);

            // 创建字符容器
            ArrayList<String> novelTextArrays = new ArrayList<>();
            Map<String, List> cachedMap = new HashMap<>();

            // 多线程分段爬取
            int piecewiseCount = getPiecewiseCount(chapterList.size());
            ExecutorService executor = Executors.newFixedThreadPool(piecewiseCount);
            for (int i = 0; i < piecewiseCount; i++) {
                final int index = i;
                final boolean isLastPiecewise = (i == piecewiseCount - 1);
                executor.execute(new Runnable() {
                    @Override
                    public void run() {
                        log.info("线程" + index + "执行中..");
                        ArrayList<String> list = crawlingPiecewise(chapterList, index,
                            isLastPiecewise);
                        log.info("线程" + index + "执行结束..");
                        cachedMap.put(String.valueOf(index), list);
                    }
                });
            }
            executor.shutdown();

            try {
                // awaitTermination返回false即超时会继续循环，返回true即线程池中的线程执行完成主线程跳出循环往下执行，每隔10秒循环一次
                while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    ;
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
            }

            for (int i = 0; i < piecewiseCount; i++) {
                novelTextArrays.addAll(cachedMap.get(String.valueOf(i)));
            }

            // 建缓冲流，追加写入
            try (FileOutputStream fileOutputStream = new FileOutputStream(file, true);
                OutputStreamWriter outputStreamWriter = new OutputStreamWriter(fileOutputStream);
                BufferedWriter bufferedWriter = new BufferedWriter(outputStreamWriter)) {
                for (String str : novelTextArrays) {
                    bufferedWriter.write(str);
                    bufferedWriter.newLine();
                }
                bufferedWriter.flush();
            } catch (IOException ex) {
                log.error("写入文件失败", ex);
            }

            long endTimeMillis = System.currentTimeMillis();
            log.info("====== 爬取完成:《" + novelName + "》,总耗时" + (endTimeMillis - startTimeMillis)
                + "毫秒 ======");
        } catch (Exception e) {
            log.error("爬取异常", e);
        }
    }

    /**
     * 分段获取小说文本字符串集合
     */
    public static ArrayList<String> crawlingPiecewise(Elements chapterList, int piecewiseIndex,
        boolean isLastPiecewise) {
        // 计算爬取的具体章节段
        int index = piecewiseIndex * ConfigConstant.COUNT_NOVEL_PIECEWISE;
        int count = 0;
        if (isLastPiecewise) {
            count = chapterList.size();
        } else {
            count = (piecewiseIndex + 1) * ConfigConstant.COUNT_NOVEL_PIECEWISE;
        }

        ArrayList<String> textArrays = new ArrayList<>();
        for (int i = index; i < count; i++) {
            // 详情node元素
            Node node = chapterList.get(i).childNode(0);
            for (Node e : chapterList.get(i).childNodes()) {
                if (StringUtils.isNotBlank(e.toString())) {
                    node = e;
                }
            }
            String href = node.toString();
            // 效验node元素超链接是否符合规则
            Matcher matcher = ConfigConstant.A_HREF_URL_PATTERN.matcher(href);
            if (!matcher.find()) {
                continue;
            }
            String nodeUrl = matcher.group(1);

            //开始爬取章节详情
            String chapterHtml = HttpClientUtils.getHtml(nodeUrl);
            if (null == chapterHtml) {
                continue;
            }
            Document documentDetails = Jsoup.parse(chapterHtml);
            Elements chapterName = documentDetails.select("h1");

            log.info("==================== [线程" + piecewiseIndex + "]正在爬取章节:" + chapterName
                + " ================");
            // 爬取之后暂存字符串容器
            textArrays.add(chapterName.text());
            Elements elements = documentDetails.select("#content");
            String[] splitText = elements.get(0).html()
                .replace("<div id='content'>", "")
                .replace("</div>", "")
                .replace("<script>readx();</script>", "")
                .replace("<script>chaptererror();</script>", "")
                .replace("<p>", "")
                .split("</p>");
            for (String str : splitText) {
                textArrays.add(str);
            }
            log.info("==================== [线程" + piecewiseIndex + "]爬取章节成功:" + chapterName
                + " ================");
        }
        return textArrays;
    }

    /**
     * @param sum 总章节数
     * @return int 分段数
     * @Description 获取分段下载数
     * @Author swain
     * @Date 2021/1/18
     */
    public static int getPiecewiseCount(int sum) {
        int count = 0;
        if (sum % ConfigConstant.COUNT_NOVEL_PIECEWISE != 0) {
            count = sum / ConfigConstant.COUNT_NOVEL_PIECEWISE + 1;
        } else {
            count = sum / ConfigConstant.COUNT_NOVEL_PIECEWISE;
        }
        return count;
    }


}
