package com.zhava.crawler.infrastructure.gateway;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.opencsv.CSVWriter;
import com.zhava.crawler.domain.enums.OutputFormatEnum;
import com.zhava.crawler.domain.gateway.CrawlerGateway;
import com.zhava.crawler.domain.model.Crawler;
import com.zhava.crawler.domain.model.PaginatedCrawler;
import com.zhava.crawler.infrastructure.util.FileExportUtil;
import com.zhava.crawler.infrastructure.util.HeadersUtil;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

import java.io.StringWriter;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * 爬虫网关接口实现
 *
 * @author zhaxiang
 */
@Component
public class CrawlerGatewayImpl implements CrawlerGateway {

    private static final Logger logger = LoggerFactory.getLogger(CrawlerGatewayImpl.class);
    private final ObjectMapper objectMapper = new ObjectMapper();

    @Override
    public Crawler executeCrawling(Crawler crawler) {
        long startTime = System.currentTimeMillis();
        
        try {
            logger.info("开始爬取页面: {}", crawler.getUrl());
            
            // 合并默认请求头和用户自定义请求头
            Map<String, String> headers = HeadersUtil.mergeWithDefaultHeaders(crawler.getHeaders());
            logger.info("使用请求头: {}", headers);
            
            // 设置连接选项
            Document document = Jsoup.connect(crawler.getUrl())
                    .timeout(crawler.getConnectTimeout())
                    .headers(headers)
                    .get();
            
            logger.info("成功获取页面, 页面标题: {}", document.title());
            
            // 设置状态码和原始内容
            crawler.setStatusCode(200); // Jsoup 成功时没有直接返回状态码，这里假设成功
            crawler.setRawContent(document.html());
            
            // 提取所有链接（如果需要）
            if (Boolean.TRUE.equals(crawler.getExtractLinks())) {
                Elements links = document.select("a[href]");
                List<String> extractedLinks = new ArrayList<>();
                links.forEach(link -> extractedLinks.add(link.attr("abs:href")));
                crawler.setLinks(extractedLinks);
                logger.info("提取到 {} 个链接", extractedLinks.size());
            }
            
            // 如果有选择器，提取内容
            if (crawler.getCssSelector() != null && !crawler.getCssSelector().isEmpty()) {
                logger.info("使用CSS选择器提取内容: {}", crawler.getCssSelector());
                crawler = extractContent(crawler, crawler.getCssSelector());
                if (crawler.getExtractedData() != null && crawler.getExtractedData() instanceof List) {
                    logger.info("提取到 {} 条数据", ((List<?>) crawler.getExtractedData()).size());
                }
            }
            
            // 记录元数据
            Map<String, String> metadata = new HashMap<>();
            metadata.put("title", document.title());
            metadata.put("contentType", document.connection().response().contentType());
            
            // 存储使用的请求头到元数据中，便于调试
            metadata.put("request-headers", headers.toString());
            
            crawler.setMetadata(metadata);
            
        } catch (Exception e) {
            crawler.setStatusCode(500);
            crawler.setErrorMessage(e.getMessage());
            logger.error("爬取失败: " + e.getMessage(), e);
        } finally {
            // 计算执行时间
            long endTime = System.currentTimeMillis();
            long executionTime = endTime - startTime;
            crawler.setExecutionTime(executionTime);
            logger.info("爬取完成，执行时间: {} 毫秒", executionTime);
        }
        
        // 处理文件导出
        if (crawler.getExportFile() != null && crawler.getExportFile() == 1) {
            exportFile(crawler);
        }
        
        return crawler;
    }
    
    @Override
    public PaginatedCrawler executePaginatedCrawling(PaginatedCrawler crawler) {
        long startTime = System.currentTimeMillis();
        int successCount = 0;
        
        try {
            logger.info("开始分页爬取，起始页: {}, 结束页: {}, URL: {}", 
                    crawler.getStartPage(), crawler.getEndPage(), crawler.getUrl());
            
            List<PaginatedCrawler.PageResult> pageResults = new ArrayList<>();
            
            // 对每个页码执行爬取
            for (int page = crawler.getStartPage(); page <= crawler.getEndPage(); page++) {
                String pageUrl = buildPageUrl(crawler.getUrl(), crawler.getPageParameterName(), page);
                logger.info("开始爬取第 {} 页: {}", page, pageUrl);
                
                PaginatedCrawler.PageResult pageResult = new PaginatedCrawler.PageResult();
                pageResult.setPageNumber(page);
                pageResult.setPageUrl(pageUrl);
                
                long pageStartTime = System.currentTimeMillis();
                
                try {
                    // 合并默认请求头和用户自定义请求头
                    Map<String, String> headers = HeadersUtil.mergeWithDefaultHeaders(crawler.getHeaders());
                    
                    // 设置连接选项并执行爬取
                    Document document = Jsoup.connect(pageUrl)
                            .timeout(crawler.getConnectTimeout())
                            .headers(headers)
                            .get();
                    
                    // 设置状态码
                    pageResult.setStatusCode(200);
                    
                    // 提取内容
                    if (crawler.getCssSelector() != null && !crawler.getCssSelector().isEmpty()) {
                        Elements elements = document.select(crawler.getCssSelector());
                        logger.info("第 {} 页使用选择器 {} 找到 {} 个元素", page, crawler.getCssSelector(), elements.size());
                        
                        if (!elements.isEmpty()) {
                            List<String> extractedTexts = new ArrayList<>();
                            elements.forEach(element -> extractedTexts.add(element.text()));
                            pageResult.setExtractedData(extractedTexts);
                        } else {
                            logger.info("第 {} 页未找到匹配选择器的内容", page);
                        }
                    }
                    
                    successCount++;
                    logger.info("第 {} 页爬取成功", page);
                } catch (Exception e) {
                    pageResult.setStatusCode(500);
                    pageResult.setErrorMessage(e.getMessage());
                    logger.error("爬取页面 {} 失败: {}", page, e.getMessage(), e);
                } finally {
                    // 计算页面爬取时间
                    long pageEndTime = System.currentTimeMillis();
                    long pageCrawlTime = pageEndTime - pageStartTime;
                    pageResult.setCrawlTime(pageCrawlTime);
                    logger.info("第 {} 页爬取耗时: {} 毫秒", page, pageCrawlTime);
                }
                
                // 添加到结果列表
                pageResults.add(pageResult);
                
                // 页面间延迟，避免请求过于频繁
                try {
                    Random random = new Random();
                    // 随机延迟500-2000毫秒
                    int sleepTime = 500 + random.nextInt(1500);
                    logger.info("页面间延迟 {} 毫秒后继续爬取", sleepTime);
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    logger.warn("页面间延迟被中断");
                }
            }
            
            // 设置爬取结果
            crawler.setPageResults(pageResults);
            crawler.setPagesCollected(successCount);
            crawler.setStatusCode(200);
            logger.info("分页爬取完成，成功爬取 {} 页", successCount);
            
        } catch (Exception e) {
            crawler.setStatusCode(500);
            crawler.setErrorMessage(e.getMessage());
            logger.error("分页爬取失败: " + e.getMessage(), e);
        } finally {
            // 计算总执行时间
            long endTime = System.currentTimeMillis();
            long totalCrawlTime = endTime - startTime;
            crawler.setTotalCrawlTime(totalCrawlTime);
            logger.info("分页爬取总耗时: {} 毫秒", totalCrawlTime);
        }
        
        // 处理文件导出
        if (crawler.getExportFile() != null && crawler.getExportFile() == 1) {
            exportPaginatedFile(crawler);
        }
        
        return crawler;
    }
    
    /**
     * 构建带页码参数的URL
     *
     * @param baseUrl 基础URL
     * @param paramName 页码参数名
     * @param pageNumber 页码
     * @return 构建后的URL
     */
    private String buildPageUrl(String baseUrl, String paramName, int pageNumber) {
        try {
            URI uri = new URI(baseUrl);
            String query = uri.getQuery();
            
            // 如果没有查询参数，直接添加
            if (query == null || query.isEmpty()) {
                return baseUrl + "?" + paramName + "=" + pageNumber;
            }
            
            // 如果已存在页码参数，则替换
            Pattern pattern = Pattern.compile("([?&])" + Pattern.quote(paramName) + "=\\d+");
            Matcher matcher = pattern.matcher(baseUrl);
            
            if (matcher.find()) {
                return matcher.replaceAll("$1" + paramName + "=" + pageNumber);
            }
            
            // 如果不存在页码参数，则添加
            return baseUrl + "&" + paramName + "=" + pageNumber;
            
        } catch (URISyntaxException e) {
            logger.error("URL格式错误: " + e.getMessage(), e);
            return baseUrl;
        }
    }

    @Override
    public Crawler extractContent(Crawler crawler, String selector) {
        try {
            if (crawler.getRawContent() != null) {
                Document document = Jsoup.parse(crawler.getRawContent());
                Elements elements = document.select(selector);
                
                if (!elements.isEmpty()) {
                    // 提取文本内容作为示例
                    List<String> extractedTexts = new ArrayList<>();
                    elements.forEach(element -> extractedTexts.add(element.text()));
                    crawler.setExtractedData(extractedTexts);
                }
            }
        } catch (Exception e) {
            crawler.setErrorMessage("提取内容失败: " + e.getMessage());
            logger.error("提取内容失败: " + e.getMessage(), e);
        }
        
        return crawler;
    }
    
    @Override
    public String exportFile(Crawler crawler) {
        if (crawler.getExportFile() == null || crawler.getExportFile() != 1 || crawler.getExtractedData() == null) {
            return null;
        }
        
        try {
            Object dataToExport = crawler.getExtractedData();
            
            // 检查数据是否为空
            if (dataToExport instanceof List && ((List<?>) dataToExport).isEmpty()) {
                logger.info("爬取结果为空，不生成文件");
                return null;
            }
            
            // 如果已经转换过格式，使用转换后的数据
            if (crawler.getOutputFormat() != OutputFormatEnum.RAW) {
                dataToExport = transformOutput(crawler, crawler.getOutputFormat());
            }
            
            String filePath = FileExportUtil.exportToFile(dataToExport, crawler.getOutputFormat(), "crawler");
            logger.info("文件导出成功: {}", filePath);
            return filePath;
        } catch (Exception e) {
            logger.error("导出文件失败: {}", e.getMessage(), e);
            return null;
        }
    }
    
    @Override
    public String exportPaginatedFile(PaginatedCrawler crawler) {
        if (crawler.getExportFile() == null || crawler.getExportFile() != 1
                || crawler.getPageResults() == null || crawler.getPageResults().isEmpty()) {
            return null;
        }
        
        try {
            // 提取所有页面的数据
            List<Object> allData = new ArrayList<>();
            for (PaginatedCrawler.PageResult pageResult : crawler.getPageResults()) {
                if (pageResult.getExtractedData() != null) {
                    if (pageResult.getExtractedData() instanceof List) {
                        allData.addAll((List<?>) pageResult.getExtractedData());
                    } else {
                        allData.add(pageResult.getExtractedData());
                    }
                }
            }
            
            // 检查合并后的数据是否为空
            if (allData.isEmpty()) {
                logger.info("分页爬取结果为空，不生成文件");
                return null;
            }
            
            String filePath = FileExportUtil.exportToFile(allData, crawler.getOutputFormat(), "paginated_crawler");
            if (filePath != null) {
                crawler.setFileExported(true);
                crawler.setExportedFilePath(filePath);
                logger.info("分页爬取文件导出成功: {}", filePath);
            }
            return filePath;
        } catch (Exception e) {
            logger.error("导出分页文件失败: {}", e.getMessage(), e);
            return null;
        }
    }

    @Override
    public Object transformOutput(Crawler crawler, OutputFormatEnum format) {
        if (crawler.getExtractedData() == null) {
            return null;
        }
        
        try {
            return switch (format) {
                case JSON -> objectMapper.writeValueAsString(crawler.getExtractedData());
                case CSV -> {
                    if (crawler.getExtractedData() instanceof List) {
                        StringWriter writer = new StringWriter();
                        CSVWriter csvWriter = new CSVWriter(writer);
                        
                        List<?> data = (List<?>) crawler.getExtractedData();
                        for (Object item : data) {
                            csvWriter.writeNext(new String[]{item.toString()});
                        }
                        
                        csvWriter.close();
                        yield writer.toString();
                    }
                    yield crawler.getExtractedData();
                }
                default -> crawler.getExtractedData();
            };
        } catch (Exception e) {
            // 转换失败时，返回原始数据
            logger.error("转换输出格式失败: " + e.getMessage(), e);
            return crawler.getExtractedData();
        }
    }
} 