package com.ruoyi.system.webcrawler;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;

@Service
public class WebCrawlerService {

    private static final Logger logger = LoggerFactory.getLogger(WebCrawlerService.class);

    @Value("${crawler.config.user-agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36}")
    private String userAgent;

    @Value("${crawler.config.timeout:10000}")
    private int timeout;

    @Value("${crawler.config.retry-times:3}")
    private int retryTimes;

    // 爬取状态管理
    private final Map<String, Boolean> crawlingStatus = new ConcurrentHashMap<>();

    public CrawlResult crawlWebsite(CrawlRequest request) {
        long startTime = System.currentTimeMillis();
        String taskId = UUID.randomUUID().toString();
        crawlingStatus.put(taskId, true);

        CrawlResult result = new CrawlResult();
        Set<String> visitedUrls = Collections.newSetFromMap(new ConcurrentHashMap<>());
        AtomicInteger pageCount = new AtomicInteger(0);

        try {
            logger.info("开始爬取网站: {}", request.getUrl());
            crawlPage(request.getUrl(), request, result, visitedUrls, pageCount, taskId);

            result.setSuccess(true);
            result.setMessage("爬取完成");
            result.setTotalPages(pageCount.get());
            result.setExtractedItems(result.getData().size());
        } catch (Exception e) {
            logger.error("爬取过程中发生错误: {}", e.getMessage(), e);
            result.setSuccess(false);
            result.setMessage("爬取失败: " + e.getMessage());
        } finally {
            crawlingStatus.remove(taskId);
            long endTime = System.currentTimeMillis();
            result.setExecutionTime(endTime - startTime);
        }

        return result;
    }

    private void crawlPage(String url, CrawlRequest request, CrawlResult result,
                           Set<String> visitedUrls, AtomicInteger pageCount, String taskId) {

        // 检查爬取状态
        if (!crawlingStatus.getOrDefault(taskId, false) ||
                pageCount.get() >= request.getMaxPages() ||
                visitedUrls.contains(url)) {
            return;
        }

        visitedUrls.add(url);
        pageCount.incrementAndGet();

        logger.info("爬取页面: {} ({}/{})", url, pageCount.get(), request.getMaxPages());

        try {
            // 使用JSoup连接并获取文档
            Document doc = Jsoup.connect(url)
                    .userAgent(userAgent)
                    .timeout(timeout)
                    .followRedirects(true)
                    .get();

            // 根据数据类型提取内容
            extractData(doc, url, result, request.getDataType());

            // 如果需要跟随链接，提取并爬取新链接
            if (request.isFollowLinks() && pageCount.get() < request.getMaxPages()) {
                Elements links = doc.select("a[href]");
                for (Element link : links) {
                    String nextUrl = link.absUrl("href");
                    if (isValidUrl(nextUrl, url) && !visitedUrls.contains(nextUrl)) {
                        // 添加延迟避免请求过快
                        try {
                            Thread.sleep(500);
                        } catch (InterruptedException e) {
                            Thread.currentThread().interrupt();
                        }
                        crawlPage(nextUrl, request, result, visitedUrls, pageCount, taskId);
                    }
                }
            }

        } catch (IOException e) {
            logger.warn("无法爬取页面 {}: {}", url, e.getMessage());
            result.addError("无法访问: " + url + " - " + e.getMessage());
        }
    }

    private void extractData(Document doc, String url, CrawlResult result, String dataType) {
        Map<String, String> pageData = new HashMap<>();
        pageData.put("url", url);
        pageData.put("title", doc.title());

        switch (dataType != null ? dataType : "html") {
            case "text":
                pageData.put("content", doc.text());
                break;

            case "links":
                Elements links = doc.select("a[href]");
                StringBuilder linksText = new StringBuilder();
                for (Element link : links) {
                    linksText.append(link.text()).append(" -> ").append(link.attr("href")).append("\n");
                }
                pageData.put("links", linksText.toString());
                break;

            case "images":
                Elements images = doc.select("img[src]");
                StringBuilder imagesText = new StringBuilder();
                for (Element img : images) {
                    imagesText.append(img.attr("alt")).append(" -> ").append(img.attr("src")).append("\n");
                }
                pageData.put("images", imagesText.toString());
                break;

            default:
                // 提取结构化数据
                pageData.put("meta_description", getMetaContent(doc, "description"));
                pageData.put("meta_keywords", getMetaContent(doc, "keywords"));
                pageData.put("h1", doc.select("h1").text());
                pageData.put("h2", doc.select("h2").text());
                pageData.put("paragraphs", String.valueOf(doc.select("p").size()));
                break;
        }

        result.addData(pageData);
    }

    private String getMetaContent(Document doc, String name) {
        Element meta = doc.select("meta[name=" + name + "]").first();
        return meta != null ? meta.attr("content") : "";
    }

    private boolean isValidUrl(String url, String baseUrl) {
        if (url == null || url.isEmpty()) {
            return false;
        }

        // 检查URL格式
        if (!url.startsWith("http://") && !url.startsWith("https://")) {
            return false;
        }

        // 避免爬取外部链接（可根据需要调整）
        try {
            String baseDomain = getDomain(baseUrl);
            String urlDomain = getDomain(url);
            return baseDomain.equals(urlDomain);
        } catch (Exception e) {
            return false;
        }
    }

    private String getDomain(String url) {
        return url.replaceAll("^(https?://)(www\\.)?([^/]+).*$", "$3");
    }

    public void stopCrawling(String taskId) {
        crawlingStatus.put(taskId, false);
    }
}
