package org.example;

import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @author Myli
 */
public class Scrap2 {

    //规定要爬取的网页
    static String url = "http://www.shukuge.com";
    static String book = "http://www.shukuge.com/book/145569/index.html";
    static Pattern pattern = Pattern.compile("第(\\d+)章");

    private static final int MAX_RETRIES = 5; // 最大重试次数
    private static final int RETRY_DELAY_MILLIS = 1000; // 重试间隔（毫秒）

    public static void main(String[] args) throws IOException {
        String fileName = "C:\\Users\\Xyulu\\Desktop\\武技特效.txt";

        ThreadPoolExecutor poolExecutor = new ThreadPoolExecutor(
                16,
                32,
                10,
                TimeUnit.SECONDS, new LinkedBlockingQueue<>());

        //多线程写入
        OrderedFileWriter writer = new OrderedFileWriter(fileName);

        //设置下载文件存放磁盘的位置
        File file = new File(fileName);
        //判断文件夹是否存在，不存在就创建
        if (!file.exists()) {
            file.createNewFile();
        }
        Document doc = connectWithRetry(book);
        //伪装成为浏览器，有的网站爬取数据会阻止访问，伪装成浏览器可以访问，这里我伪装成Google浏览器
        System.out.println("开始连接网站……");
        //寻找名叫panel-body的div，接着再在里面寻找叫list-group.list-charts的ul，再在里面寻找li，再在li里寻找a    //"div.panel-body ul.list-group.list-charts li a"
        Elements as = doc.select("div[id=list]").get(0).select("dd");
        if (as.isEmpty()) {
            System.out.println("未查到任何内容，请检查标签是否正确！");
        } else {
//            //去除列表最近 12条非章节数据
//            ArrayList<Element> elements = new ArrayList<>();
//            for (int a = 0; a < 12; a++) {
//                elements.add(a, as.get(0));
//                as.remove(0);
//            }
            int index = 1;
            for (Element a : as) {
                //对于每一个元素取出拿到href和小说名字
                String href = a.getElementsByAttribute("href").attr("href");
                String title = extractTitle(a);
                //file:表示存放文件的位置
                //href:表示要读取内容的页面
                //title:表示存放文件名称
                final int threadIndex = index++;
                if (file.exists() || !href.isEmpty() || !title.isEmpty()) {
                    poolExecutor.execute(() -> save(file, href, threadIndex, title, writer));
                } else {
                    System.out.println("！！！！！！！！！！！！！！");
                }
            }
        }
        poolExecutor.shutdown();
    }

    private static void save(File file, String href, int threadIndex, String title, OrderedFileWriter orderedFileWriter) {
        String url2 = "";
        Connection conn = null;
        Document doc = null;
        String content = "";
        try {
            Writer out = new FileWriter(file, true);
            //构建读取页面的url2
            url2 = url + href;
            doc = connectWithRetry(url2);
            if (null == doc) {
                System.out.println("======" + title + "加载失败=======");
            }
            //获取小说的正文
            content = doc.select("div[id=content]").html();
            //处理特殊数据
            content = content.replace("最新网址：bi xiasheng hua.c o m", "");
            content = content.replace("<br>", "");
            content = content.replace("<br>", "");
            content = content.replace("<p>", "");
            content = content.replace("</p>", "");
            content = content.replace("&nbsp;", "");
            content = content.replace("　　", "\r\n  ");
            orderedFileWriter.write(threadIndex, content, title);
            out.write("\n" + title + "\r\n" + content);
            out.close();
        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
        //需要使用休眠，为了防止网站检测为蓄意攻击，停止我们的IP访问
        int n = (int) (Math.random() * 1000);
        try {
            Thread.sleep(n);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }


    private static String extractTitle(Element a) {
        String title = a.text().replace(" ", "")
                .replace("?", "");
        return title;
    }

    public static Document connectWithRetry(String url) {
        for (int i = 0; i < MAX_RETRIES; i++) {
            try {
                Connection conn = Jsoup.connect(url);
                // 设置Firefox的User-Agent
                String userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36";
                Document document = conn.userAgent(userAgent).get();
                return document;
            } catch (IOException e) {
                if (i < MAX_RETRIES - 1) {
                    // 如果还有重试次数，等待一段时间后重试
                    try {
                        Thread.sleep(RETRY_DELAY_MILLIS);
                        System.out.println("======开始尝试重连" + url);
                    } catch (InterruptedException ie) {
                        ie.printStackTrace();
                    }
                } else {
                    // 最后一次重试失败，抛出异常或返回null
                    e.printStackTrace();
                    return null;
                }
            }
        }
        return null;
    }


    public static Integer extractChapter(String title) {
        // 正则表达式匹配"第"和"章"之间的数字
        Matcher matcher = pattern.matcher(title);
        // 获取第一个捕获组中的数字字符串
        if (matcher.find()) {
            String number = matcher.group(1);
            // 输出提取到的数字
            return Integer.parseInt(number);
        } else {
            return 0;
        }
    }
}
