package work.comeback.omo.service.crawler;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;

class SQLiteExample {
    private static final Logger logger = LoggerFactory.getLogger(SQLiteExample.class);

    public static void main(String[] args) {
        // 连接到 SQLite 数据库（如果数据库文件不存在，SQLite 会自动创建）
        String url = "jdbc:sqlite:sample.db"; // 数据库文件路径
        try (Connection conn = DriverManager.getConnection(url)) {
            if (conn != null) {
                logger.info("Connected to the database.");

                // 创建表
                String createTableSQL = "CREATE TABLE IF NOT EXISTS links (id INTEGER PRIMARY KEY AUTOINCREMENT, url TEXT)";
                conn.createStatement().execute(createTableSQL);

                // 插入数据
                String insertSQL = "INSERT INTO links(url) VALUES(?)";
                try (PreparedStatement pstmt = conn.prepareStatement(insertSQL)) {
                    // 假设抓取的链接是 "https://example.com"
                    pstmt.setString(1, "https://example.com");
                    pstmt.executeUpdate();
                    logger.info("Link inserted into the database.");
                }

            }
        } catch (SQLException e) {
            logger.warn(e.getMessage());
        }
    }
}

class WebScraper {
    private static final Logger logger = LoggerFactory.getLogger(WebScraper.class);

    public static void main(String[] args) throws Exception {
        // 连接到指定 URL
        String url = "https://example.com";
        Document doc = Jsoup.connect(url).get(); // 获取网页内容

        // 1. 使用 CSS 选择器抓取数据
        Elements links = doc.select("a[href]"); // 获取所有的 <a> 标签及其 href 属性
        for (Element link : links) {
            logger.info("Link: " + link.attr("href"));
        }

        // 2. 使用 XPath （通过 Jsoup 提供的选择器功能，Jsoup 自身不直接支持 XPath）
        // 例如，提取所有的段落
        Elements paragraphs = doc.select("p");
        for (Element paragraph : paragraphs) {
            logger.info("Paragraph: " + paragraph.text());
        }

        // 3. 使用正则表达式
        String htmlContent = doc.html();
        if (htmlContent.matches(".*<title>.*</title>.*")) {
            logger.info("Title tag found in the page");
        }
    }
}

public class WebScraperWithDatabase {
    private static final Logger logger = LoggerFactory.getLogger(WebScraperWithDatabase.class);

    public static void main(String[] args) throws Exception {
        String url = "https://example.com";
        Document doc = Jsoup.connect(url).get();

        // 连接到 SQLite 数据库
        String dbUrl = "jdbc:sqlite:sample.db";
        try (Connection conn = DriverManager.getConnection(dbUrl)) {
            if (conn != null) {
                // 创建表
                String createTableSQL = "CREATE TABLE IF NOT EXISTS links (id INTEGER PRIMARY KEY AUTOINCREMENT, url TEXT)";
                conn.createStatement().execute(createTableSQL);

                // 提取网页中的所有链接并存储到数据库
                Elements links = doc.select("a[href]");
                String insertSQL = "INSERT INTO links(url) VALUES(?)";
                try (PreparedStatement stmt = conn.prepareStatement(insertSQL)) {
                    for (Element link : links) {
                        String linkUrl = link.attr("href");
                        stmt.setString(1, linkUrl);
                        stmt.executeUpdate();
                    }
                }
                logger.info("Links stored in the database.");
            }
        } catch (SQLException e) {
            logger.warn(e.getMessage());
        }
    }
}
