package com.zwz.tools;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.sql.*;
import java.util.Queue;
import java.util.LinkedList;

public class DBCrawler {
    // 数据库配置
    private static final String JDBC_URL = "jdbc:mysql://localhost:3306/crawler?useSSL=false";
    private static final String DB_USER = "root";
    private static final String DB_PASSWORD = "password";

    private Connection conn;
    private Queue<String> urlQueue = new LinkedList<>();
    private int processedCount = 0;

    public DBCrawler() {
        initDB();
        loadProgress();
    }

    // 初始化数据库连接和表结构
    private void initDB() {
        try {
            conn = DriverManager.getConnection(JDBC_URL, DB_USER, DB_PASSWORD);

            // 创建表结构（如果不存在）
            String[] sqls = {
                    "CREATE TABLE IF NOT EXISTS url_queue ("
                            + "id INT AUTO_INCREMENT PRIMARY KEY,"
                            + "url VARCHAR(512) NOT NULL UNIQUE,"
                            + "status TINYINT DEFAULT 0)",  // 0=待处理 1=已处理

                    "CREATE TABLE IF NOT EXISTS page_data ("
                            + "id INT AUTO_INCREMENT PRIMARY KEY,"
                            + "url VARCHAR(512) NOT NULL UNIQUE,"
                            + "content LONGTEXT,"
                            + "crawled_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP)"
            };

            for (String sql : sqls) {
                try (Statement stmt = conn.createStatement()) {
                    stmt.execute(sql);
                }
            }
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }

    // 加载进度
    private void loadProgress() {
        try {
            // 加载待处理队列
            try (PreparedStatement pstmt = conn.prepareStatement(
                    "SELECT url FROM url_queue WHERE status = 0")) {
                ResultSet rs = pstmt.executeQuery();
                while (rs.next()) {
                    urlQueue.add(rs.getString("url"));
                }
            }

            // 获取已处理数量
            try (PreparedStatement pstmt = conn.prepareStatement(
                    "SELECT COUNT(*) AS cnt FROM url_queue WHERE status = 1")) {
                ResultSet rs = pstmt.executeQuery();
                if (rs.next()) {
                    processedCount = rs.getInt("cnt");
                }
            }
            System.out.println("恢复进度：已处理 " + processedCount + " 个页面");
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }

    // 添加初始URL
    public void addSeedUrl(String url) {
        if (!existsInDB(url)) {
            urlQueue.add(url);
            saveUrlToQueue(url);
        }
    }

    // 检查URL是否存在
    private boolean existsInDB(String url) {
        try (PreparedStatement pstmt = conn.prepareStatement(
                "SELECT 1 FROM url_queue WHERE url = ?")) {
            pstmt.setString(1, url);
            return pstmt.executeQuery().next();
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return false;
    }

    // 保存URL到队列表
    private void saveUrlToQueue(String url) {
        try (PreparedStatement pstmt = conn.prepareStatement(
                "INSERT INTO url_queue (url) VALUES (?)")) {
            pstmt.setString(1, url);
            pstmt.executeUpdate();
        } catch (SQLException e) {
            // 忽略唯一约束错误
        }
    }

    // 开始爬取
    public void startCrawling() {
        while (!urlQueue.isEmpty()) {
            String currentUrl = urlQueue.poll();
            try {
                System.out.println("正在爬取: " + currentUrl);

                // 使用Jsoup获取页面内容
                Document doc = Jsoup.connect(currentUrl)
                        .userAgent("Mozilla/5.0")
                        .timeout(10000)
                        .get();

                // 处理页面内容（示例：提取所有链接）
                Elements links = doc.select("a[href]");
                for (Element link : links) {
                    String nextUrl = link.absUrl("href");
                    if (!existsInDB(nextUrl)) {
                        urlQueue.add(nextUrl);
                        saveUrlToQueue(nextUrl);
                    }
                }

                // 保存内容到数据库
                saveContentToDB(currentUrl, doc.text());

                // 标记为已处理
                markAsProcessed(currentUrl);
                processedCount++;

            } catch (IOException e) {
                System.err.println("爬取失败: " + currentUrl);
                e.printStackTrace();
            }
        }
        System.out.println("爬取完成，共处理 " + processedCount + " 个页面");
    }

    private void saveContentToDB(String url, String content) {
        try (PreparedStatement pstmt = conn.prepareStatement(
                "INSERT INTO page_data (url, content) VALUES (?, ?)")) {
            pstmt.setString(1, url);
            pstmt.setString(2, content);
            pstmt.executeUpdate();
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }

    private void markAsProcessed(String url) {
        try (PreparedStatement pstmt = conn.prepareStatement(
                "UPDATE url_queue SET status = 1 WHERE url = ?")) {
            pstmt.setString(1, url);
            pstmt.executeUpdate();
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
        DBCrawler crawler = new DBCrawler();

        // 添加初始URL（仅在第一次运行时需要）
        if (crawler.processedCount == 0) {
            crawler.addSeedUrl("https://example.com");
        }

        // 开始爬取
        crawler.startCrawling();
    }
}