package com.crawler.system.service.impl;

import com.crawler.system.entity.CrawlerResult;
import com.crawler.system.entity.CrawlerRule;
import com.crawler.system.repository.CrawlerRuleRepository;
import com.crawler.system.repository.CrawlerResultRepository;
import com.crawler.system.repository.CrawlerTaskRepository;
import com.crawler.system.service.AbstractCrawlerEngine;
import lombok.extern.slf4j.Slf4j;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.IOException;
import java.net.Authenticator;
import java.net.MalformedURLException;
import java.net.PasswordAuthentication;
import java.net.URL;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * 基础爬虫引擎
 * 实现静态HTML页面的爬取，使用Jsoup库进行页面解析和数据提取
 * 
 * @author crawler-system
 */
@Slf4j
public class BasicCrawlerEngine extends AbstractCrawlerEngine {

    /**
     * 爬取深度限制
     * 默认值为0，表示只爬取种子URL
     * -1表示无限深度
     */
    private int depthLimit = 0;
    
    /**
     * 当前爬取深度
     */
    private AtomicInteger currentDepth = new AtomicInteger(0);
    
    /**
     * 线程池执行器
     * 用于多线程并发爬取
     */
    private ExecutorService executorService;
    
    /**
     * 当前运行状态
     * true: 运行 false: 已停止
     */
    private volatile boolean running = false;
    
    /**
     * 每层URL计数
     * 用于计算每一层深度的URL数量，从而计算进度
     */
    private Map<Integer, AtomicInteger> depthUrlCountMap = new ConcurrentHashMap<>();
    
    /**
     * 每层处理完成的URL计数
     * 用于计算每一层已处理的URL数量，从而计算进度
     */
    private Map<Integer, AtomicInteger> depthProcessedCountMap = new ConcurrentHashMap<>();

    private CrawlerRuleRepository ruleRepository;

    /**
     * 默认构造函数
     */
    public BasicCrawlerEngine() {
        super();
    }
    
    /**
     * 构造函数
     * 
     * @param taskRepository 爬虫任务仓库
     * @param resultRepository 爬虫结果仓库
     * @param ruleRepository 爬虫规则仓库
     */
    public BasicCrawlerEngine(
        CrawlerTaskRepository taskRepository,
        CrawlerResultRepository resultRepository,
        CrawlerRuleRepository ruleRepository
    ) {
        super(taskRepository, resultRepository);
        this.ruleRepository = ruleRepository;
    }
    
    /**
     * 设置爬虫规则仓库
     * 
     * @param ruleRepository 爬虫规则仓库
     */
    public void setRuleRepository(CrawlerRuleRepository ruleRepository) {
        this.ruleRepository = ruleRepository;
    }

    @Override
    public List<Long> start() {
        try {
            log.info("基础爬虫引擎开始任务 taskId={}, targetUrl={}", task.getId(), task.getTargetUrl());
            
            // 记录开始时间
            startTime = LocalDateTime.now();
            
            // 更新任务状态
            status = "RUNNING";
            updateTaskStatus("RUNNING");
            
            // 初始化线程池
            initThreadPool();
            
            // 设置运行状态
            running = true;
            
            // 初始化URL队列和计数器
            initUrlQueue();
            
            // 开始爬取流
            startCrawling();
            
            // 记录结束时间
            endTime = LocalDateTime.now();
            
            // 更新任务状态
            if ("RUNNING".equals(status)) {
                status = "COMPLETED";
                updateTaskStatus("COMPLETED");
            }
            
            log.info("基础爬虫引擎完成任务: taskId={}, resultCount={}", task.getId(), resultIds.size());
            return resultIds;
        } catch (Exception e) {
            errorMessage = "爬虫任务执行异常: " + e.getMessage();
            log.error("基础爬虫引擎执行异常: taskId={}", task.getId(), e);
            
            status = "FAILED";
            updateTaskStatus("FAILED");
            
            return resultIds;
        } finally {
            // 关闭线程池
            shutdownThreadPool();
            
            // 设置运行状态
            running = false;
        }
    }
    
    @Override
    public void stop() {
        running = false;
        super.stop();
    }
    
    @Override
    public void setDepth(int depth) {
        this.depthLimit = depth;
        log.info("设置爬取深度: taskId={}, depth={}", task != null ? task.getId() : "unknown", depth);
    }
    
    /**
     * 初始化线程池
     */
    private void initThreadPool() {
        // 使用默认线程数或配置的线程数
        int threadCount = this.threadCount > 0 ? this.threadCount : Runtime.getRuntime().availableProcessors();
        log.info("初始化线程池: taskId={}, threadCount={}", task.getId(), threadCount);
        
        // 创建固定大小的线程池
        executorService = Executors.newFixedThreadPool(threadCount, r -> {
            Thread thread = new Thread(r);
            thread.setName("crawler-" + task.getId() + "-" + thread.getId());
            return thread;
        });
    }
    
    /**
     * 关闭线程池
     */
    private void shutdownThreadPool() {
        if (executorService != null && !executorService.isShutdown()) {
            try {
                // 尝试优雅关闭线程池
                executorService.shutdown();
                
                // 等待线程池关闭，最多等待30秒
                if (!executorService.awaitTermination(30, TimeUnit.SECONDS)) {
                    // 如果等待超时，强制关闭
                    executorService.shutdownNow();
                }
            } catch (InterruptedException e) {
                // 如果当前线程被中断，强制关闭线程池
                executorService.shutdownNow();
                Thread.currentThread().interrupt();
            }
            
            log.info("关闭线程池 taskId={}", task.getId());
        }
    }
    
    /**
     * 初始化URL队列和计数器
     */
    private void initUrlQueue() {
        pendingUrls.clear();
        crawledUrls.clear();
        resultIds.clear();
        currentDepth.set(0);
        depthUrlCountMap.clear();
        depthProcessedCountMap.clear();
        
        // 添加种子URL
        if (task.getTargetUrl() != null && !task.getTargetUrl().isEmpty()) {
            pendingUrls.add(task.getTargetUrl());
            AtomicInteger urlCount = depthUrlCountMap.computeIfAbsent(0, k -> new AtomicInteger(0));
            urlCount.incrementAndGet();
        }
    }
    
    /**
     * 开始爬取流
     */
    private void startCrawling() throws InterruptedException {
        // 按层次遍历进行爬取
        while (running && (depthLimit == -1 || currentDepth.get() <= depthLimit)) {
            int depth = currentDepth.get();
            
            // 当前深度没有要爬取的URL，爬取结束
            if (pendingUrls.isEmpty()) {
                break;
            }
            
            log.info("开始爬取第{}层 taskId={}, urlCount={}", depth, task.getId(), depthUrlCountMap.getOrDefault(depth, new AtomicInteger(0)).get());
            
            // 创建下一层URL队列
            Queue<String> nextLevelUrls = new ConcurrentLinkedQueue<>();
            
            // 当前层次的URL总数
            int totalUrls = depthUrlCountMap.getOrDefault(depth, new AtomicInteger(0)).get();
            
            // 当前层次已处理的URL计数
            AtomicInteger processedCount = depthProcessedCountMap.computeIfAbsent(depth, k -> new AtomicInteger(0));
            
            // 获取当前层次的所有URL
            List<String> currentUrls = new ArrayList<>();
            String url;
            while ((url = pendingUrls.poll()) != null) {
                currentUrls.add(url);
            }
            
            // 使用CountDownLatch等待当前层次的所有URL处理完成
            CountDownLatch latch = new CountDownLatch(currentUrls.size());
            
            // 提交爬取任务
            for (String currentUrl : currentUrls) {
                if (!running) {
                    break;
                }
                
                // 等待请求间隔
                Thread.sleep(interval);
                
                // 提交爬取任务
                executorService.submit(() -> {
                    try {
                        // 爬取当前URL
                        List<String> extractedUrls = crawlUrl(currentUrl, depth);
                        
                        // 将提取的URL添加到下一层队列
                        if (extractedUrls != null && !extractedUrls.isEmpty()) {
                            for (String extractedUrl : extractedUrls) {
                                if (shouldCrawl(extractedUrl)) {
                                    nextLevelUrls.add(extractedUrl);
                                    
                                    // 更新下一层URL计数
                                    AtomicInteger nextLevelCount = depthUrlCountMap.computeIfAbsent(depth + 1, k -> new AtomicInteger(0));
                                    nextLevelCount.incrementAndGet();
                                }
                            }
                        }
                    } catch (Exception e) {
                        log.error("爬取URL异常: taskId={}, url={}", task.getId(), currentUrl, e);
                    } finally {
                        // 更新已处理URL计数
                        processedCount.incrementAndGet();
                        
                        // 更新爬取进度
                        updateProgress();
                        
                        // 减少计数
                        latch.countDown();
                    }
                });
            }
            
            // 等待当前层次的所有URL处理完成
            latch.await();
            
            // 将下一层URL添加到待爬取队列
            pendingUrls.addAll(nextLevelUrls);
            
            // 增加深度
            currentDepth.incrementAndGet();
        }
    }
    
    /**
     * 爬取单个URL
     * 
     * @param url 要爬取的URL
     * @param depth 当前深度
     * @return 提取的链接列表
     */
    private List<String> crawlUrl(String url, int depth) {
        // 如果URL已经爬取过，直接返回空列表
        if (crawledUrls.contains(url)) {
            return Collections.emptyList();
        }
        
        // 标记URL为已爬取
        crawledUrls.add(url);
        
        CrawlerResult result = new CrawlerResult();
        result.setSourceUrl(url);
        result.setDepth(depth);
        result.setProcessStatus("PENDING");
        result.setFetchTime(LocalDateTime.now());
        
        long startTime = System.currentTimeMillis();
        
        try {
            log.debug("爬取URL: taskId={}, url={}, depth={}", task.getId(), url, depth);
            
            // 准备请求
            Connection connection = Jsoup.connect(url)
                    .timeout(timeout)
                    .ignoreContentType(true)
                    .ignoreHttpErrors(true)
                    .followRedirects(true);
            
            // 设置请求头
            if (!headers.isEmpty()) {
                connection.headers(headers);
            } else {
                // 设置默认User-Agent
                connection.userAgent("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36");
            }
            
            // 设置Cookie
            if (!cookies.isEmpty()) {
                for (Map.Entry<String, String> entry : cookies.entrySet()) {
                    connection.cookie(entry.getKey(), entry.getValue());
                }
            }
            
            // 设置代理
            if (proxyIp != null) {
                connection.proxy(proxyIp.getIpAddress(), proxyIp.getPort());
                
                // 设置代理认证
                if (proxyIp.getUsername() != null && proxyIp.getPassword() != null) {
                    Authenticator.setDefault(new Authenticator() {
                        @Override
                        protected PasswordAuthentication getPasswordAuthentication() {
                            return new PasswordAuthentication(proxyIp.getUsername(), proxyIp.getPassword().toCharArray());
                        }
                    });
                }
            }
            
            // 执行请求
            Connection.Response response = connection.execute();
            
            // 计算请求时间
            long endTime = System.currentTimeMillis();
            long fetchDuration = endTime - startTime;
            
            // 解析响应
            Document document = response.parse();
            
            // 填充结果
            result.setStatusCode(response.statusCode());
            result.setContentType(response.contentType());
            result.setCharset(response.charset());
            result.setContentSize((long) response.bodyAsBytes().length);
            result.setTitle(document.title());
            result.setHtmlContent(document.outerHtml());
            result.setTextContent(document.text());
            result.setFetchDuration(fetchDuration);
            result.setProcessStatus("PROCESSED");
            
            // 提取并保存图片URL
            Elements imgElements = document.select("img[src]");
            List<String> imageUrls = new ArrayList<>();
            for (Element img : imgElements) {
                String imgUrl = img.absUrl("src");
                if (!imgUrl.isEmpty()) {
                    imageUrls.add(imgUrl);
                }
            }
            result.setImageUrls(String.join(",", imageUrls));
            
            // 提取并保存链接URL
            List<String> extractedUrls = extractLinks(document, url);
            result.setLinkUrls(String.join(",", extractedUrls));
            
            // 保存结果
            Long resultId = saveResult(result);
            if (resultId != null) {
                resultIds.add(resultId);
            }
            
            return extractedUrls;
        } catch (IOException e) {
            // 处理爬取异常
            log.error("爬取URL异常: taskId={}, url={}", task.getId(), url, e);
            
            // 填充错误结果
            result.setStatusCode(0);
            result.setProcessStatus("FAILED");
            result.setProcessMessage(e.getMessage());
            result.setFetchDuration(System.currentTimeMillis() - startTime);
            
            // 保存错误结果
            Long resultId = saveResult(result);
            if (resultId != null) {
                resultIds.add(resultId);
            }
            
            return Collections.emptyList();
        }
    }
    
    /**
     * 更新爬取进度
     */
    private void updateProgress() {
        int totalUrls = 0;
        int processedUrls = 0;
        
        // 计算总URL数和已处理URL数
        for (Map.Entry<Integer, AtomicInteger> entry : depthUrlCountMap.entrySet()) {
            int depth = entry.getKey();
            int count = entry.getValue().get();
            totalUrls += count;
            
            // 获取已处理URL数
            AtomicInteger processed = depthProcessedCountMap.get(depth);
            if (processed != null) {
                processedUrls += processed.get();
            }
        }
        
        // 计算进度百分比
        int progressValue = totalUrls > 0 ? (int) ((processedUrls * 100.0) / totalUrls) : 0;
        
        // 更新进度
        progress.set(progressValue);
        
        // 更新统计信息
        statistics.put("totalUrls", totalUrls);
        statistics.put("processedUrls", processedUrls);
        statistics.put("progress", progressValue);
    }
    
    /**
     * 从文档中提取链接
     * 
     * @param document HTML文档
     * @param baseUrl 基础URL
     * @return 提取的链接列表
     */
    @Override
    protected List<String> extractLinks(String html, String baseUrl) {
        try {
            Document document = Jsoup.parse(html, baseUrl);
            return extractLinks(document, baseUrl);
        } catch (Exception e) {
            log.error("解析HTML提取链接异常: taskId={}, baseUrl={}", task.getId(), baseUrl, e);
            return Collections.emptyList();
        }
    }
    
    /**
     * 从文档中提取链接
     * 
     * @param document HTML文档
     * @param baseUrl 基础URL
     * @return 提取的链接列表
     */
    private List<String> extractLinks(Document document, String baseUrl) {
        List<String> links = new ArrayList<>();
        
        // 提取所有a>标签的href属性
        Elements linkElements = document.select("a[href]");
        
        for (Element link : linkElements) {
            // 获取绝对URL
            String absUrl = link.absUrl("href");
            
            // 规范化URL
            String normalizedUrl = normalizeUrl(absUrl, baseUrl);
            
            // 添加到链接列表
            if (normalizedUrl != null && !normalizedUrl.isEmpty()) {
                links.add(normalizedUrl);
            }
        }
        
        return links;
    }
    
    /**
     * 规范化URL
     * 
     * @param url 原始URL
     * @param baseUrl 基础URL
     * @return 规范化后的URL
     */
    @Override
    protected String normalizeUrl(String url, String baseUrl) {
        if (url == null || url.isEmpty()) {
            return null;
        }
        
        // 忽略JavaScript链接和锚点链接
        if (url.startsWith("javascript:") || url.startsWith("#")) {
            return null;
        }
        
        try {
            // 转换为URL对象
            URL urlObj = new URL(new URL(baseUrl), url);
            
            // 去除锚点部分
            String normalized = urlObj.getProtocol() + "://" + urlObj.getHost() +
                    (urlObj.getPort() != -1 ? ":" + urlObj.getPort() : "") +
                    urlObj.getPath() +
                    (urlObj.getQuery() != null ? "?" + urlObj.getQuery() : "");
            
            return normalized;
        } catch (MalformedURLException e) {
            log.debug("无效URL: {}", url);
            return null;
        }
    }

    @Override
    public void cleanup() {
        super.cleanup();
        if (executorService != null && !executorService.isShutdown()) {
            executorService.shutdownNow();
        }
        pendingUrls.clear();
        crawledUrls.clear();
    }
    
    /**
     * 执行JavaScript脚本
     * 基础爬虫引擎不支持JavaScript执行
     *
     * @param script JavaScript代码
     * @return 执行结果，总是为null
     */
    @Override
    public Object executeJavaScript(String script) {
        log.warn("基础爬虫引擎不支持JavaScript执行");
        return null;
    }
    
    /**
     * 设置动态页面处理配置
     * 基础爬虫引擎不支持动态页面处理
     *
     * @param config 配置参数映射
     */
    @Override
    public void setDynamicPageConfig(Map<String, Object> config) {
        super.setDynamicPageConfig(config);
        log.warn("基础爬虫引擎不支持动态页面处理，配置将被忽略");
    }
} 
