package com.newsanalysis.crawler;

import com.newsanalysis.model.entity.News;
import com.newsanalysis.service.NewsService;

import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.hc.client5.http.classic.methods.HttpGet;
import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse;
import org.apache.hc.client5.http.impl.classic.HttpClients;
import org.apache.hc.core5.http.io.entity.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.apache.hc.core5.http.ParseException;

import java.io.IOException;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

@Slf4j
@Component
@RequiredArgsConstructor
public class WebCrawler {
    
    private final NewsService newsService;
    
    @Value("${crawler.thread.pool.size}")
    private int threadPoolSize;
    
    @Value("${crawler.user-agent}")
    private String userAgent;
    
    // 网站特定的选择器配置
    private static final ConcurrentHashMap<String, WebsiteConfig> WEBSITE_CONFIGS = new ConcurrentHashMap<>();
    
    static {
        // 新浪新闻配置
        WEBSITE_CONFIGS.put("sina.com.cn", new WebsiteConfig(
                "article,.news-item,.news-box", // 新闻项选择器
                "h1,h2,.title,.main-title,.news-title", // 标题选择器
                "p,.article-content,.news-content" // 内容选择器
        ));
        
        // 网易新闻配置
        WEBSITE_CONFIGS.put("163.com", new WebsiteConfig(
                "article,.news_item,.news-item", // 新闻项选择器
                "h1,h2,.title,.news_title", // 标题选择器
                "p,.content,.post_content" // 内容选择器
        ));
        
        // 腾讯新闻配置
        WEBSITE_CONFIGS.put("qq.com", new WebsiteConfig(
                "article,.list_item,.news-box", // 新闻项选择器
                "h1,h2,.title,.tit", // 标题选择器
                "p,.content,.detail_content" // 内容选择器
        ));
        
        // 搜狐新闻配置
        WEBSITE_CONFIGS.put("sohu.com", new WebsiteConfig(
                "article,.news-wrapper,.news-box", // 新闻项选择器
                "h1,h2,.title,.article-title", // 标题选择器
                "p,.article-content,.news-text" // 内容选择器
        ));
        
        // 默认配置（当没有匹配特定网站时使用）
        WEBSITE_CONFIGS.put("default", new WebsiteConfig(
                "article, .news, .news-item, .article", // 新闻项选择器
                "h1, h2, .title, .headline", // 标题选择器
                "p, .content, .summary, .description" // 内容选择器
        ));
    }
    
    /**
     * 启动爬虫任务
     * 
     * @param urlList 要爬取的URL列表
     */
    public void startCrawling(List<String> urlList) {
        startCrawlingAndReturnCount(urlList);
    }
    
    /**
     * 启动爬虫任务并返回爬取数量
     * 
     * @param urlList 要爬取的URL列表
     * @return 成功爬取的新闻数量
     */
    public int startCrawlingAndReturnCount(List<String> urlList) {
        ExecutorService executor = Executors.newFixedThreadPool(threadPoolSize);
        AtomicInteger totalSavedCount = new AtomicInteger(0);
        
        for (String url : urlList) {
            executor.execute(() -> {
                try {
                    List<News> newsList = crawlWebsite(url);
                    if (newsList != null && !newsList.isEmpty()) {
                        int savedCount = newsService.batchSave(newsList);
                        totalSavedCount.addAndGet(savedCount);
                        log.info("成功爬取并保存 {} 条新闻，来源：{}", savedCount, url);
                    }
                } catch (Exception e) {
                    log.error("爬取 {} 失败: {}", url, e.getMessage(), e);
                }
            });
        }
        
        executor.shutdown();
        try {
            executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        } catch (InterruptedException e) {
            log.error("爬虫任务被中断: {}", e.getMessage(), e);
            Thread.currentThread().interrupt();
        }
        
        return totalSavedCount.get();
    }
    
    /**
     * 爬取特定网站的新闻
     * 
     * @param url 要爬取的URL
     * @return 爬取的新闻列表
     * @throws IOException 爬取过程中的IO异常
     */
    private List<News> crawlWebsite(String url) throws IOException {
        HttpGet request = new HttpGet(url);
        request.setHeader("User-Agent", userAgent);
        
        try (CloseableHttpClient httpClient = HttpClients.createDefault();
             CloseableHttpResponse response = httpClient.execute(request)) {
            
            if (response.getCode() == 200) {
                try {
                    String htmlContent = EntityUtils.toString(response.getEntity(), "UTF-8");
                    return parseNewsContent(htmlContent, url);
                } catch (ParseException e) {
                    log.error("解析响应内容时发生错误: {}", e.getMessage(), e);
                    return null;
                }
            } else {
                log.error("获取内容失败，状态码: {}", response.getCode());
                return null;
            }
        }
    }
    
    /**
     * 解析网页内容提取新闻信息
     * 
     * @param htmlContent HTML内容
     * @param baseUrl 基础URL
     * @return 解析出的新闻列表
     */
    private List<News> parseNewsContent(String htmlContent, String baseUrl) {
        List<News> newsList = new ArrayList<>();
        Document doc = Jsoup.parse(htmlContent);
        
        // 确定使用哪个网站的配置
        WebsiteConfig config = determineWebsiteConfig(baseUrl);
        
        // 使用特定网站的选择器获取新闻元素
        Elements newsElements = doc.select(config.getNewsItemSelector());
        log.debug("找到 {} 个新闻元素，使用选择器: {}", newsElements.size(), config.getNewsItemSelector());
        
        for (Element element : newsElements) {
            try {
                // 提取标题
                Element titleElement = element.selectFirst(config.getTitleSelector());
                if (titleElement == null) continue;
                
                // 提取链接
                Element linkElement = titleElement.selectFirst("a");
                String link = linkElement != null ? linkElement.attr("href") : "";
                
                // 处理相对URL
                if (link.startsWith("/")) {
                    link = getBaseDomain(baseUrl) + link;
                } else if (!link.startsWith("http")) {
                    // 处理可能的其他相对URL格式
                    link = baseUrl + (baseUrl.endsWith("/") ? "" : "/") + link;
                }
                
                // 提取内容摘要
                Element contentElement = element.selectFirst(config.getContentSelector());
                String content = contentElement != null ? contentElement.text() : "";
                
                // 如果内容为空，尝试获取其他可能包含内容的元素
                if (content.isEmpty()) {
                    Elements paragraphs = element.select("p");
                    if (!paragraphs.isEmpty()) {
                        StringBuilder sb = new StringBuilder();
                        for (Element p : paragraphs) {
                            sb.append(p.text()).append("\n\n");
                        }
                        content = sb.toString().trim();
                    }
                }
                
                // 提取来源
                String source = extractDomain(baseUrl);
                
                // 创建新闻对象
                News news = News.builder()
                        .title(titleElement.text())
                        .content(content)
                        .url(link)
                        .source(source)
                        .crawlTime(LocalDateTime.now())
                        .publishTime(LocalDateTime.now().minusHours(1)) // 默认发布时间比爬取时间早1小时
                        .build();
                
                // 如果链接不为空且内容不为空，则添加到列表
                if (!link.isEmpty() && !content.isEmpty() && !titleElement.text().isEmpty()) {
                    newsList.add(news);
                }
            } catch (Exception e) {
                log.error("解析新闻元素错误: {}", e.getMessage(), e);
            }
        }
        
        log.info("从 {} 成功解析 {} 条新闻", baseUrl, newsList.size());
        return newsList;
    }
    
    /**
     * 根据URL确定使用哪个网站的配置
     * 
     * @param url 要爬取的URL
     * @return 对应的网站配置
     */
    private WebsiteConfig determineWebsiteConfig(String url) {
        String domain = extractDomain(url);
        
        for (String key : WEBSITE_CONFIGS.keySet()) {
            if (domain.contains(key)) {
                return WEBSITE_CONFIGS.get(key);
            }
        }
        
        // 如果没有匹配的配置，返回默认配置
        return WEBSITE_CONFIGS.get("default");
    }
    
    /**
     * 从URL中提取域名
     * 
     * @param url URL
     * @return 域名
     */
    private String extractDomain(String url) {
        String domain = url.toLowerCase();
        if (domain.startsWith("http://")) {
            domain = domain.substring(7);
        } else if (domain.startsWith("https://")) {
            domain = domain.substring(8);
        }
        
        int slashIndex = domain.indexOf("/");
        if (slashIndex > 0) {
            domain = domain.substring(0, slashIndex);
        }
        
        return domain;
    }
    
    /**
     * 获取URL的基础域名（包括协议）
     * 
     * @param url URL
     * @return 基础域名
     */
    private String getBaseDomain(String url) {
        String domain = url.toLowerCase();
        int slashAfterProtocol = -1;
        
        if (domain.startsWith("http://")) {
            slashAfterProtocol = domain.indexOf("/");
        } else if (domain.startsWith("https://")) {
            slashAfterProtocol = domain.indexOf("/");
        }
        
        if (slashAfterProtocol > 0) {
            return domain.substring(0, slashAfterProtocol);
        }
        
        return domain;
    }
    
    /**
     * 网站爬取配置类
     */
    static class WebsiteConfig {
        private final String newsItemSelector;
        private final String titleSelector;
        private final String contentSelector;
        
        public WebsiteConfig(String newsItemSelector, String titleSelector, String contentSelector) {
            this.newsItemSelector = newsItemSelector;
            this.titleSelector = titleSelector;
            this.contentSelector = contentSelector;
        }
        
        public String getNewsItemSelector() {
            return newsItemSelector;
        }
        
        public String getTitleSelector() {
            return titleSelector;
        }
        
        public String getContentSelector() {
            return contentSelector;
        }
    }
}

