package com.xunhang.service.impl;

import com.xunhang.entity.News;
import com.xunhang.entity.Type;
import com.xunhang.repository.NewsRepository;
import com.xunhang.repository.TypeRepository;
import com.xunhang.service.Crawler2Service;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import java.io.IOException;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;

@Service
public class Crawler2ServiceImpl implements Crawler2Service {

    @Autowired
    private NewsRepository newsRepository;

    @Autowired
    private TypeRepository typeRepository;

    // 基础URL用于处理相对路径
    private static final String BASE_URL = "https://uc.whu.edu.cn";

    @Override
    @Transactional
    public void crawlSingleCollegeNews(String listUrl) throws IOException {
        try {
            int currentPage = parsePageNumber(listUrl);
            boolean foundNewNews = false;

            do {
                Document listPage = getDocumentWithRetry(listUrl, 3);
                Elements newsItems = listPage.select("div.list_txt > ul.am-list > li");

                for (Element newsItem : newsItems) {
                    News parsedNews = parseCollegeNewsItem(newsItem);
                    String newsTitle = parsedNews.getTitle();

                    if (!newsRepository.existsByTitle(newsTitle)) {
                        processCollegeNewsItem(newsItem, listUrl);
                        System.out.println("[学院新闻] 新增新闻: " + newsTitle);
                        foundNewNews = true;
                        return; // 找到一条新新闻后即返回
                    } else {
                        System.out.println("[学院新闻] 新闻已存在: " + newsTitle);
                    }
                }

                System.out.println("[学院新闻] 当前页没有新新闻，尝试下一页...");

                // 计算下一页页码 (37,36,35...)
                currentPage++;
                listUrl = getNextPageUrl(currentPage);

                // 防止无限循环，设置最大翻页次数
                if (currentPage > 5) { // 假设最多翻5页
                    System.out.println("不再爬取更早时间的新闻");
                    break;
                }

                // 添加延迟避免被封
                TimeUnit.SECONDS.sleep(1);

            } while (!foundNewNews);

        } catch (Exception e) {
            throw new IOException("[学院新闻] 抓取单条新闻失败: " + e.getMessage(), e);
        }
    }

    @Override
    @Transactional
    public void crawlBatchCollegeNews(String listUrl, int maxCount) throws IOException {
        try {
            int currentPage = parsePageNumber(listUrl);
            int addedCount = 0;
            int remainingCount = maxCount;

            while (remainingCount > 0 && currentPage <= 50) {
                System.out.printf("[学院新闻] 正在抓取第%d页，还需%d条新闻%n", currentPage, remainingCount);

                Document listPage = getDocumentWithRetry(listUrl, 3);
                Elements newsItems = listPage.select("div.list_txt > ul.am-list > li");

                for (Element newsItem : newsItems) {
                    if (remainingCount <= 0) break;

                    News parsedNews = parseCollegeNewsItem(newsItem);
                    if (!newsRepository.existsByTitle(parsedNews.getTitle())) {
                        processCollegeNewsItem(newsItem, listUrl);
                        addedCount++;
                        remainingCount--;
                        System.out.printf("[学院新闻] 新增新闻[%d/%d]: %s%n",
                                addedCount, maxCount, parsedNews.getTitle());
                    }
                }

                if (remainingCount <= 0) break;

                currentPage++;
                if (currentPage > 5) { // 假设最多翻5页
                    System.out.println("不再爬取更早时间的新闻");
                    break;
                }
                listUrl = getNextPageUrl(currentPage);
                TimeUnit.SECONDS.sleep(1);
            }

            System.out.printf("[学院新闻] 操作完成，实际新增%d/%d条新闻%n", addedCount, maxCount);
        } catch (Exception e) {
            throw new IOException("[学院新闻] 批量抓取失败: " + e.getMessage(), e);
        }
    }

    // ========== 私有工具方法 ==========

    /**
     * 解析学院新闻列表项
     */
    private News parseCollegeNewsItem(Element item) {
        News news = new News();
        Element link = item.selectFirst("a");

        if (link != null) {
            news.setTitle(link.selectFirst("span").text());
            news.setDetailUrl(resolveRelativeUrl(link.attr("href")));
            news.setSource("武汉大学本科生院");
        }
        return news;
    }

    /**
     * 处理学院新闻详情
     */
    private String fixCollegeNewsUrl(String rawUrl) {
        if (rawUrl == null || rawUrl.isEmpty()) {
            return null;
        }
        // 简单处理/../的情况
        if (rawUrl.contains("/../")) {
            return "https://uc.whu.edu.cn" + rawUrl.substring(rawUrl.indexOf("/../") + 3);
        }
        // 如果已经是完整URL，直接返回
        if (rawUrl.startsWith("http")) {
            return rawUrl;
        }

        // 其他相对路径
        return "https://uc.whu.edu.cn" + (rawUrl.startsWith("/") ? "" : "/") + rawUrl;
    }
    private void processCollegeNewsItem(Element item, String refererUrl) throws IOException {
        News news = parseCollegeNewsItem(item);
        String detailUrl = fixCollegeNewsUrl(news.getDetailUrl());
        System.out.println("[DEBUG] 解析到学院新闻条目: " + news.getTitle() + ", URL: " + detailUrl);

        if (detailUrl == null) {
            System.out.println("[WARN] 学院新闻URL为空，跳过");
            return;
        }

        try {
            Document detailPage = Jsoup.connect(detailUrl)
                    .referrer(refererUrl)
                    .userAgent("Mozilla/5.0")
                    .timeout(10000)
                    .get();
            System.out.println("[DEBUG] 成功获取学院新闻详情页: " + detailUrl);


            // 解析详情页内容
            Element content = detailPage.selectFirst("div.v_news_content");
            if (content == null) {
                content = detailPage.selectFirst("p.vsbcontent_start");
            }

            if (content != null) {
                //0.提取发布时间
                Element metaPubDate = detailPage.selectFirst("meta[name=PubDate]");
                if (metaPubDate != null) {
                    String dateStr = metaPubDate.attr("content").trim(); // 获取 content 属性
                    DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm");
                    LocalDateTime publishTime = LocalDateTime.parse(dateStr, formatter);
                    news.setSubmitTime(publishTime);
                    System.out.println("发布时间 (Meta 标签): " + publishTime);
                }
                // 1. 移除不需要的元素（脚本、样式、广告等）
                content.select("script, style, img, iframe, .ad").remove();

                // 2. 提取所有段落 <p> 并保留换行格式
                StringBuilder detailContent = new StringBuilder();
                for (Element p : content.select("p")) {
                    String paragraph = p.text().trim();
                    if (!paragraph.isEmpty()) {
                        // 保留段落换行（每个 <p> 后加两个换行符）
                        detailContent.append(paragraph).append("\n\n");
                    }
                }

                // 3. 如果没有 <p> 标签，则回退到整个内容（保持兼容性）
                if (detailContent.length() == 0) {
                    detailContent.append(content.text());
                }

                // 4. 去除多余空行并设置到 news 对象
                String formattedDetail = detailContent.toString()
                        .replaceAll("(\n\\s*){3,}", "\n\n")  // 合并连续多个空行
                        .trim();

                news.setDetail(formattedDetail);



            // 稳定可靠的作者信息提取
                Elements paragraphs = content.select("p"); // 选择所有段落更安全
                List<String> possibleAuthorParagraphs = new ArrayList<>();

                // 第一步：收集可能包含作者信息的段落
                for (Element p : paragraphs) {
                    String text = p.text().trim();
                    if (text.contains("通讯员") || text.contains("撰稿") || text.contains("撰写")) {
                        possibleAuthorParagraphs.add(text);
                    }
                }

                // 第二步：精确提取作者信息
                for (String text : possibleAuthorParagraphs) {
                    try {
                        String authors = "";

                        // 情况1：带括号的格式
                        if (text.contains("（") && text.contains("）")) {
                            String bracketContent = text.substring(text.indexOf("（") + 1, text.indexOf("）"));
                            if (bracketContent.contains("通讯员")) {
                                authors = bracketContent.replaceFirst("通讯员\\s*[:：]?\\s*", "")
                                        .split("[，；;）)]")[0]
                                        .trim();
                            }
                        }
                        // 情况2：不带括号的格式
                        else {
                            authors = text.replaceFirst(".*(通讯员|撰稿|撰写)\\s*[:：]?\\s*", "")
                                    .split("[，；;）)]")[0]
                                    .trim();
                        }

                        // 统一处理结果
                        authors = authors.replaceAll("[、，]", "、")
                                .replaceAll("\\s+", " ")
                                .replaceAll("^[、， ]+|[、， ]+$", "");

                        if (!authors.isEmpty()) {
                            news.setAuthor(authors);
                            System.out.println("[成功提取] 作者: " + authors);
                            break; // 找到后跳出循环，不退出方法
                        }
                    } catch (Exception e) {
                        System.err.println("[提取异常] 段落: " + text + " | 错误: " + e.getMessage());
                    }
                }




                // 图片提取选择带有name="Image"的meta标签
                Document document = Jsoup.connect(detailUrl).get();

                Element metaImage = document.selectFirst("meta[name=Image]");
                if (metaImage != null) {
                    String cont = metaImage.attr("content");

                    // 方法1：先尝试按分号分割
                    String[] imageUrls = cont.split(";");

                    // 如果分号分割失败（只有1个元素），尝试按空格分割
                    if (imageUrls.length <= 1) {
                        imageUrls = cont.split("\\s+");
                    }

                    // 取第一个URL（可能还包含后续URL，需要进一步处理）
                    if (imageUrls.length > 0) {
                        String firstImageUrl = imageUrls[0];

                        // 智能截断到图片扩展名
                        int pngIndex = firstImageUrl.indexOf(".png");
                        int jpgIndex = firstImageUrl.indexOf(".jpg");
                        int jpegIndex = firstImageUrl.indexOf(".jpeg");

                        // 找到最近的图片扩展名位置
                        int endIndex = firstImageUrl.length();
                        if (pngIndex != -1) endIndex = pngIndex + 4;
                        if (jpgIndex != -1 && jpgIndex < endIndex) endIndex = jpgIndex + 4;
                        if (jpegIndex != -1 && jpegIndex < endIndex) endIndex = jpegIndex + 5;

                        // 截取真正的图片URL
                        firstImageUrl = firstImageUrl.substring(0, endIndex);

                        // 补全协议
                        if (firstImageUrl.startsWith("//")) {
                            firstImageUrl = "https:" + firstImageUrl;
                        } else if (!firstImageUrl.startsWith("http")) {
                            firstImageUrl = "https://" + firstImageUrl;
                        }

                        news.setImages(firstImageUrl);
                        System.out.println("[DEBUG] 提取第一张图片: " + firstImageUrl);
                    }
                }



            }

            if (isValidNews(news)) {
                System.out.println("[DEBUG] 准备保存学院新闻: " + news.getTitle());
                newsRepository.save(news);
                News savedNews = newsRepository.save(news);
                System.out.println("[SUCCESS] 已保存学院新闻到数据库: " + news.getTitle());



            } else {
                System.out.println("[WARN] 学院新闻校验失败: " + news.getTitle());
            }
        } catch (IOException e) {
            System.err.println("[ERROR] 处理学院新闻失败: " + news.getTitle() + ", URL: " + detailUrl);
            throw e;
        }
    }

    /**
     * 解析当前页码
     */
    private int parsePageNumber(String url) {
        if (url.equals("https://uc.whu.edu.cn/xwzx/xyfc.htm")) {
            return 0;
        } else if (url.matches("https://uc.whu.edu.cn/xwzx/xyfc/\\d+\\.htm")) {
            String num = url.replaceAll(".*/(\\d+)\\.htm$", "$1");
            return 37 - Integer.parseInt(num) + 1;
        }
        return 0;
    }

    /**
     * 生成下一页URL
     */
    private String getNextPageUrl(int currentPage) {
        if (currentPage == 1) {
            return "https://uc.whu.edu.cn/xwzx/xyfc/37.htm";
        } else {
            int pageNum = 37 - (currentPage - 1);
            return String.format("https://uc.whu.edu.cn/xwzx/xyfc/%d.htm", pageNum);
        }
    }

    /**
     * 处理相对URL
     */
    private String resolveRelativeUrl(String url) {
        if (url == null) return null;
        if (url.startsWith("http")) return url;
        if (url.startsWith("../")) url = url.substring(2);
        return BASE_URL + "/" + url;
    }

    /**
     * 带重试机制的请求
     */
    private Document getDocumentWithRetry(String url, int maxRetry) throws IOException {
        IOException lastException = null;
        for (int i = 0; i < maxRetry; i++) {
            try {
                return Jsoup.connect(url)
                        .userAgent("Mozilla/5.0")
                        .timeout(10000)
                        .get();
            } catch (IOException e) {
                lastException = e;
                try {
                    TimeUnit.SECONDS.sleep(1);
                } catch (InterruptedException ie) {
                    Thread.currentThread().interrupt();
                }
            }
        }
        throw lastException;
    }

    /**
     * 验证新闻数据有效性
     */
    private boolean isValidNews(News news) {
        return news.getTitle() != null && !news.getTitle().isEmpty()
                && news.getDetail() != null && !news.getDetail().isEmpty();
    }
}