package com.zyx.spider.site.yicai;

import com.zyx.spider.model.NewsItem;
import lombok.extern.slf4j.Slf4j;
import org.jsoup.Connection;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;

@Slf4j
@Component
@ConditionalOnProperty(name = "spider.site.yicai.enabled", havingValue = "true")
public class YCNewsCrawlerTask {

    // 示例新闻网站URL
    private static final String NEWS_URL = "https://www.yicai.com/news/kechuang/";

    /**
     * 每30分钟执行一次新闻抓取
     */
    @Scheduled(fixedRate = 300, initialDelay = 5, timeUnit = TimeUnit.MINUTES)
    public void crawlNews() throws IOException {
        log.info("开始执行【第一财经】新闻抓取任务...");
        // 创建链接
        Connection conn = Jsoup.connect(NEWS_URL);
        // GET方式请求Web数据，返回文档
        Document document = conn.get();
        log.info("抓取到新闻栏目标题--「{}」", document.title());

        // 抓取新闻列表外层DIV元素
        Elements items = document.select("div#newslist");
        // 遍历新闻列表
        for (org.jsoup.nodes.Element item : items) {
            // 获取新闻链接
            Element link = Objects.requireNonNull(item.selectFirst("a.f-db"));
            // 获取新闻标题
            Element headline = Objects.requireNonNull(item.selectFirst("div.common h2"));
            // 获取新闻简介
            Element summary = Objects.requireNonNull(item.selectFirst("div.common p"));

            log.info("《{}》-- {} 「{}」", headline.text(), link.attr("href"), summary.text());
        }
    }

    /**
     * 每天凌晨2点执行一次完整抓取
     */
    @Scheduled(cron = "0 0 2 * * ?")
    public void fullCrawl() {
        log.info("开始执行完整新闻抓取任务...");
        // 实现完整抓取逻辑
    }

    private List<NewsItem> fetchLatestNews() throws IOException {
        List<NewsItem> newsItems = new ArrayList<>();

        // 使用Jsoup连接新闻网站并解析HTML
        Document doc = Jsoup.connect(NEWS_URL)
                .userAgent("Mozilla/5.0")
                .timeout(10000)
                .get();

        // 根据实际网站结构选择新闻条目
        Elements newsElements = doc.select(".news-item");

        for (Element element : newsElements) {
            String title = element.select(".title").text();
            String url = element.select("a").attr("href");
            String summary = element.select(".summary").text();
            String publishTime = element.select(".time").text();

            NewsItem item = new NewsItem(title, url, summary, publishTime);
            newsItems.add(item);
        }

        return newsItems;
    }

    private void processNewsItems(List<NewsItem> newsItems) {
        // 这里可以实现新闻数据的存储或其他处理逻辑
        // 例如保存到数据库、发送通知等
        newsItems.forEach(item -> {
            // 示例：打印新闻信息
            log.info("抓取到新闻: {}, 发布时间: {}", item.getTitle(), item.getPublishTime());
        });
    }
}