import schedule
import time
import asyncio
import logging
from datetime import datetime, timedelta

from database import DatabaseManager
from data_collector import DataCollector
from llm_analyzer import AlibabaCloudLLM
from sentiment_analyzer import SentimentAnalyzer
from config import Config


logger = logging.getLogger(__name__)


class TaskScheduler:
    def __init__(self):
        self.db_manager = DatabaseManager(Config.MONGODB_URL, Config.MONGODB_DATABASE)
        self.llm_analyzer = AlibabaCloudLLM(Config.ALIBABA_CLOUD_API_KEY)
        self.sentiment_analyzer = SentimentAnalyzer(self.db_manager, self.llm_analyzer)
        self.data_collector = DataCollector()

    def setup_schedules(self):
        """设置定时任务"""
        schedule.every().hour.do(self.collect_data_job)  # 每小时采集
        schedule.every(2).hours.do(self.analyze_sentiment_job)  # 每2小时分析
        schedule.every().day.at("09:00").do(self.generate_daily_report_job)  # 每天报告
        schedule.every().week.do(self.cleanup_old_data_job)  # 每周清理
        logger.info("定时任务已设置")

    def collect_data_job(self):
        """数据采集任务"""
        try:
            logger.info("开始执行数据采集任务")
            start = datetime.now()

            articles = self.data_collector.collect_news_articles(
                Config.NEWS_SOURCES, Config.NEW_ENERGY_KEYWORDS
            )
            # RSS 可选
            try:
                rss_sources = getattr(Config, "RSS_SOURCES", [])
                if rss_sources:
                    rss_articles = self.data_collector.collect_rss_articles(
                        rss_sources, Config.NEW_ENERGY_KEYWORDS
                    )
                    articles.extend(rss_articles)
            except Exception as e:
                logger.error(f"RSS采集失败: {e}")

            saved = 0
            for a in articles:
                res = self.db_manager.save_article(a)
                if res and res.get("inserted"):
                    saved += 1

            dur = (datetime.now() - start).total_seconds()
            logger.info(f"数据采集任务完成，保存了 {saved} 篇文章，耗时 {dur:.2f} 秒")
        except Exception as e:
            logger.error(f"数据采集任务失败: {e}")

    def analyze_sentiment_job(self):
        """情感分析任务"""
        try:
            logger.info("开始执行情感分析任务")
            start = datetime.now()

            articles = self.db_manager.get_articles(limit=50)
            if not articles:
                logger.info("没有新文章需要分析")
                return

            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            try:
                results = loop.run_until_complete(
                    self.sentiment_analyzer.analyze_articles(articles)
                )
            finally:
                loop.close()

            dur = (datetime.now() - start).total_seconds()
            logger.info(
                f"情感分析任务完成，分析了 {len(results)} 篇文章，耗时 {dur:.2f} 秒"
            )
        except Exception as e:
            logger.error(f"情感分析任务失败: {e}")

    def generate_daily_report_job(self):
        """生成日报任务"""
        try:
            logger.info("开始生成日报")
            start = datetime.now()
            report = self.sentiment_analyzer.generate_sentiment_report(1)

            import os
            os.makedirs("reports", exist_ok=True)
            filename = f"reports/daily_report_{datetime.now().strftime('%Y%m%d')}.txt"
            with open(filename, "w", encoding="utf-8") as f:
                f.write(report)

            dur = (datetime.now() - start).total_seconds()
            logger.info(f"日报生成完成，保存到 {filename}，耗时 {dur:.2f} 秒")
        except Exception as e:
            logger.error(f"生成日报失败: {e}")

    def cleanup_old_data_job(self):
        """清理旧数据任务"""
        try:
            logger.info("开始清理旧数据")
            start = datetime.now()
            cutoff = datetime.now() - timedelta(days=90)
            result = self.db_manager.articles.delete_many({"created_at": {"$lt": cutoff}})
            self.db_manager.sentiments.delete_many({"created_at": {"$lt": cutoff}})
            dur = (datetime.now() - start).total_seconds()
            logger.info(
                f"数据清理完成，删除了 {result.deleted_count} 条旧记录，耗时 {dur:.2f} 秒"
            )
        except Exception as e:
            logger.error(f"清理旧数据失败: {e}")

    def run_scheduler(self):
        """运行调度器"""
        logger.info("任务调度器启动")
        while True:
            try:
                schedule.run_pending()
                time.sleep(60)  # 每分钟检查一次
            except KeyboardInterrupt:
                logger.info("任务调度器停止")
                break
            except Exception as e:
                logger.error(f"调度器运行错误: {e}")
                time.sleep(60)

    def run_once(self):
        """运行一次所有任务（用于测试）"""
        logger.info("执行一次性任务")
        try:
            self.collect_data_job()
            time.sleep(5)
            self.analyze_sentiment_job()
            time.sleep(5)
            self.generate_daily_report_job()
            logger.info("一次性任务执行完毕")
        except Exception as e:
            logger.error(f"执行一次性任务失败: {e}")


if __name__ == "__main__":
    import sys

    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
        handlers=[logging.FileHandler("scheduler.log"), logging.StreamHandler()],
    )

    scheduler = TaskScheduler()
    scheduler.setup_schedules()

    if len(sys.argv) > 1 and sys.argv[1] == "--once":
        scheduler.run_once()
    else:
        scheduler.run_scheduler()

