# -*- coding: utf-8 -*-
import os
import sys
import logging
import argparse
from datetime import datetime

# 工具模块
from utils.logger import setup_logging
from utils.config_loader import Config

# 数据模型与数据库
from models.database import DatabaseManager
from models.comment_schema import RawComment, AnalyzedComment, CommentTag, TagDict, VideoSource

# 采集模块
from data_collector.captureYoutube import CommentCollector
from utils.youtube_client import get_youtube_service, initialize_api_key_rotator
from data_collector.captureYoutube import fetch_youtube_replies

# 分析模块
from data_reader import get_sample_comments
from data_analysis.analysis_comment import CommentAnalyzer
from data_analysis.comment_analyzer import CommentAnalyzer

# 字幕翻译模块
from translate.video_translator import VideoSubtitleTranslator

from sqlalchemy.dialects.mysql import insert
from sqlalchemy import text, or_, and_  # ⬅️ 添加这一行！！！
import json
from rich.progress import Progress, SpinnerColumn, TextColumn
from apscheduler.schedulers.blocking import BlockingScheduler
from utils.common import get_comment_id

from data_collector.captureYoutube import CommentCollector
from data_collector.captureYoutube import fetch_youtube_replies

# 在文件顶部的导入区域添加以下内容
from youtube_transcript_api.proxies import WebshareProxyConfig
import time


# 获取当前文件所在目录的上一级（即项目根目录）
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))

# 将项目根目录加入 sys.path
if project_root not in sys.path:
    sys.path.insert(0, project_root)


# 设置代理（如需）
os.environ["HTTP_PROXY"] = "http://127.0.0.1:7890"
os.environ["HTTPS_PROXY"] = "http://127.0.0.1:7890"


class LabelCache:
    _instance = None

    def __init__(self, db_manager=None):
        if db_manager is None:
            raise ValueError("❌ db_manager 不能为空")
        self.db_manager = db_manager
        self.load_cache(db_manager)

    def load_cache(self, db_manager):
        session = db_manager.Session()
        try:
            tags = session.query(TagDict).all()
            self.label_map = {tag.name: tag.id for tag in tags}
            self.main_tag_map = {tag.name: tag.parent_id for tag in tags if tag.level == 2}
        finally:
            session.close()

    def refresh(self):
        """手动刷新标签缓存"""
        self.load_cache(self.db_manager)


def refresh_label_cache(db_manager):
    """手动刷新标签缓存"""
    LabelCache(db_manager=db_manager).refresh()


class YouTubeVideoDownloader:
    """
    使用 yt-dlp 下载指定 ID 的 YouTube 视频
    """

    def __init__(self, output_path="downloads/"):
        self.output_path = output_path
        self._setup_logger()

    def _setup_logger(self):
        """设置基础日志器"""
        self.logger = logging.getLogger("YouTubeVideoDownloader")
        if not self.logger.handlers:
            handler = logging.StreamHandler()
            formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
            handler.setFormatter(formatter)
            self.logger.addHandler(handler)
        self.logger.setLevel(logging.INFO)

    def download(self, video_id):
        try:
            from yt_dlp import YoutubeDL
        except ImportError:
            raise RuntimeError("❌ 缺少依赖: pip install yt-dlp")

        os.makedirs(self.output_path, exist_ok=True)

        mp4_url = f"https://www.youtube.com/watch?v={video_id}"

        ydl_opts = {
            'format': 'best',
            'outtmpl': os.path.join(self.output_path, f'%(id)s_%(title)s.%(ext)s'),
            'quiet': False,
            'no_warnings': False,
            'http_headers': {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0 Safari/537.36',
                'Referer': 'https://www.google.com/',
            }
        }

        try:
            with YoutubeDL(ydl_opts) as ydl:
                result = ydl.extract_info(mp4_url, download=True)
                video_title = result.get('title', '未知标题')
                self.logger.info(f"🎬 开始下载: {video_title}")
                self.logger.info("✅ 下载完成")
        except Exception as e:
            raise RuntimeError(f"❌ 下载失败: {str(e)}")


def run_stage_4(db_manager):
    """只运行阶段4：评论分析（仅使用 d_raw_comments.is_analyzed）"""
    logging.info("⏰ 开始执行定时任务 - 阶段4：评论分析")
    try:
        config = Config()
        session = db_manager.Session()
        analyzer = CommentAnalyzer(api_key=config.DEEPSEEK_API_KEY)

        # 初始化变量
        batch_size = 10
        total_processed = 0
        empty_analysis_count = 0
        MAX_EMPTY_ANALYSIS = 3

        # 获取上次处理的最后评论ID
        last_processed_id = get_last_processed_id(db_manager)
        logging.info(f"📌 上次处理的最后评论ID: {last_processed_id}")

        # 如果启用 --force-reset，则强制从 id=0 开始
        if "--force-reset" in sys.argv:
            logging.warning("🔁 强制重置偏移量为 0")
            last_processed_id = 0
        else:
            # 尝试获取最早一条未分析评论
            # 查询最早一条未分析评论
            first_unanalyzed = session.execute(text("""
                SELECT id 
                FROM d_raw_comments 
                WHERE is_analyzed = FALSE AND analyzed_at IS NULL 
                ORDER BY id ASC LIMIT 1
            """)).fetchone()
            if first_unanalyzed:
               logging.warning("⚠️ 检测到未分析评论，但当前偏移量过高，尝试重置偏移量为 0")
               logging.debug(f"📌 最小可用评论ID: {first_unanalyzed[0]}")
               last_processed_id = 0
            else:
                logging.info("✅ 所有评论已完成分析")
                return       


        # 刷新标签缓存
        logging.info("🔄 开始刷新标签缓存...")
        refresh_label_cache(db_manager)
        logging.info("✅ 标签缓存刷新完成。")

        while True:
            sample_comments = get_sample_comments(
                db_manager,
                last_id=last_processed_id,
                batch_size=batch_size
            )
            logging.info(f"🔍 获取到 {len(sample_comments)} 条待分析评论")

            if not sample_comments:
                logging.info("✅ 所有评论已完成分析")
                break

            logging.info("🧠 正在分析评论...")
            analyzed_results = analyzer.batch_analyze(sample_comments)
            logging.info(f"🧠 成功分析 {len(analyzed_results or [])} 条评论")

            if not analyzed_results:
                empty_analysis_count += 1
                logging.warning(f"⚠️ 连续第 {empty_analysis_count} 次无有效分析结果")
                try:
                # 安全提取最大ID
                    comment_ids = [get_comment_id(comment) for comment in sample_comments]
                    valid_ids = [cid for cid in comment_ids if cid is not None and cid > 0]

                    if valid_ids:
                        last_processed_id = max(valid_ids)
                        logging.debug(f"📌 更新偏移量至: {last_processed_id}")
                    else:
                        last_processed_id = 0
                        logging.warning("⚠️ 当前批次无有效评论ID，偏移量保持为 0")            

                except Exception as e:
                    logging.error(f"❌ 提取最大ID失败: {str(e)}")
                    last_processed_id = 0
                                    

                update_last_offset(db_manager, offset=last_processed_id)
                logging.info(f"📌 已更新偏移量为 {last_processed_id}")

                if empty_analysis_count >= MAX_EMPTY_ANALYSIS:
                    logging.error("❌ 达到最大空分析次数，主动退出以避免死循环")
                    break
                continue

            empty_analysis_count = 0  # 重置计数器

            save_analysis_results(db_manager, analyzed_results)
            total_processed += len(analyzed_results)

            try:
                # 安全提取最大ID
                comment_ids = [get_comment_id(comment) for comment in sample_comments]
                valid_ids = [cid for cid in comment_ids if cid is not None and cid > 0]

                if valid_ids:
                    last_processed_id = max(valid_ids)
                    logging.debug(f"📌 更新偏移量至: {last_processed_id}")
                else:
                    last_processed_id = 0
                    logging.warning("⚠️ 当前批次无有效评论ID，偏移量保持为 0")

            except Exception as e:
                logging.error(f"❌ 提取最大ID失败: {str(e)}")
                last_processed_id = 0

            update_last_offset(db_manager, offset=last_processed_id)
            logging.info(f"✅ 当前已处理 {total_processed} 条评论（最后处理ID：{last_processed_id}）")

        logging.info("=== 系统运行结束 ===")

    except Exception as e:
        logging.error(f"🚨 定时任务失败: {str(e)}", exc_info=True)
    finally:
        db_manager.close_connection()


def update_last_offset(db_manager, offset, start_time=None, end_time=None):
    max_retries = 3
    retry_count = 0
    
    while retry_count < max_retries:
        session = db_manager.Session()
        try:
            # 确保 offset 至少为 0
            safe_offset = int(offset) if offset is not None and offset != '' else 0

            fields = {"offset": safe_offset}
            updates = ["last_processed_id = :offset"]
            if start_time:
                fields["start_time"] = start_time
                updates.append("start_time = :start_time")

            if end_time:
                fields["end_time"] = end_time
                updates.append("end_time = :end_time")

            sql = f"""
                INSERT INTO d_analysis_progress (last_processed_id, start_time, end_time)
                VALUES (:offset, :start_time, :end_time)
                ON DUPLICATE KEY UPDATE {', '.join(updates)}
            """

            session.execute(text(sql), {
                "offset": safe_offset,
                "start_time": start_time,
                "end_time": end_time
            })
            session.commit()
            return  # 成功执行，退出重试循环

        except Exception as e:
            session.rollback()
            retry_count += 1
            if retry_count >= max_retries or ("MySQL server has gone away" not in str(e) and "Can't connect to MySQL server" not in str(e)):
                logging.error(f"❌ 更新进度失败: {str(e)}", exc_info=True)
                raise
            else:
                logging.warning(f"⚠️ 数据库连接问题，第 {retry_count} 次重试...")
                time.sleep(2 ** retry_count)  # 指数退避
        finally:
            session.close()


def get_last_processed_id(db_manager):
    """获取最后处理的评论ID"""
    max_retries = 3
    retry_count = 0
    
    while retry_count < max_retries:
        session = db_manager.Session()
        try:
            result = session.execute(
                text("SELECT last_processed_id FROM d_analysis_progress ORDER BY id DESC LIMIT 1")
            ).fetchone()

            if result and result[0] is not None:
                return int(result[0])
            else:
                logging.warning("⚠️ 进度表为空，使用默认偏移量 0")
                return 0

        except Exception as e:
            retry_count += 1
            if retry_count >= max_retries or ("MySQL server has gone away" not in str(e) and "Can't connect to MySQL server" not in str(e)):
                logging.error(f"❌ 获取最后处理ID失败: {str(e)}", exc_info=True)
                return 0
            else:
                logging.warning(f"⚠️ 数据库连接问题，第 {retry_count} 次重试...")
                time.sleep(2 ** retry_count)  # 指数退避
        finally:
            session.close()


def main(skip_collection=False, max_process=None):
    """
    主程序入口
    :param skip_collection: 是否跳过采集阶段
    :param max_process: 最多处理多少条评论（可选）
    """
    parser = argparse.ArgumentParser(description="YouTube数据分析系统")
    parser.add_argument("--skip-collection", action="store_true", help="跳过数据采集和报告生成阶段")
    parser.add_argument("--max-process", type=int, default=None, help="最大处理评论数量（可选）")
    parser.add_argument("--refresh-label-cache", action="store_true", help="启动时刷新标签缓存")
    parser.add_argument("--run-scheduler", action="store_true", help="启动定时任务")
    parser.add_argument("--run-once", action="store_true", help="立即执行一次任务（仅阶段4）")
    parser.add_argument("--reprocess-all", action="store_true", help="强制重新分析所有评论")
    parser.add_argument("--force-reset", action="store_true", help="强制从 ID=0 开始分析")  # 新增这一行
    parser.add_argument("--stage", choices=["1-3", "4"], help="选择执行的阶段：1-3 为采集，4 为分析")  # 新增这一行

    parser.add_argument("--extract-subtitles", action="store_true", help="提取并翻译视频字幕")
    parser.add_argument("--video-id", type=str, default=None, help="指定视频ID进行处理")
    parser.add_argument("--translate-subtitles", action="store_true", help="翻译视频字幕")
    parser.add_argument("--batch", nargs='+', help="指定多个视频ID进行翻译")
    parser.add_argument("--all", action="store_true", help="翻译数据库中所有视频")
    parser.add_argument("--format", choices=['srt', 'json'], default='srt', help="输出格式")
    parser.add_argument(
        "--translator-type",
        choices=["deepseek", "chatgpt"],
        default="deepseek",
        help="选择字幕翻译所使用的模型，默认为 deepseek"
    )

    # ✅ 新增：用于下载 YouTube 视频的命令
    parser.add_argument("--download-youtube-video", type=str, help="根据视频 ID 下载 YouTube 视频")
    parser.add_argument(
        "--skip-translate",
        action="store_true",
        help="仅将已存在的字幕文件写入数据库，跳过调用翻译接口"
    )

    args = parser.parse_args()
    
    # ✅ 新增：优先处理视频下载
    if args.download_youtube_video:
        try:
            downloader = YouTubeVideoDownloader(output_path="downloads/")
            downloader.download(args.download_youtube_video.strip())
            logging.info("✅ 视频下载完成")
            return
        except Exception as e:
            logging.error(str(e))
            return

    setup_logging()
    config = Config()
    db_manager = DatabaseManager(config.DB_CONFIG)

    logging.info("=== YouTube数据分析系统启动 ===")
    logging.info(f"skip_collection 模式: {skip_collection}")

    if not skip_collection:
        # 阶段1-3：采集流程...
        pass

    # 阶段4：评论分析（始终运行）
    logging.info("=== 即将进入阶段4：分页分析评论 ===")

    analyzer = CommentAnalyzer(api_key=config.DEEPSEEK_API_KEY)
    batch_size = 10
    total_processed = 0
    empty_analysis_count = 0
    MAX_EMPTY_ANALYSIS = 3  # 最大允许空分析次数

    # 获取上次处理的最后评论ID
    last_processed_id = get_last_processed_id(db_manager)
    logging.info(f"📌 上次处理的最后评论ID: {last_processed_id}")

    # 如果启用 --force-reset，则强制从 id=0 开始
    if "--force-reset" in sys.argv:
        logging.warning("🔁 强制重置偏移量为 0")
        last_processed_id = 0

    session = db_manager.Session()
    try:
        # 查询是否存在未分析评论
        has_unprocessed = session.query(RawComment).filter(
            RawComment.is_analyzed == False,
            RawComment.id > last_processed_id
        ).first()

        if not has_unprocessed:
            first_unanalyzed = session.query(RawComment.id).filter(
                RawComment.is_analyzed == False
            ).order_by(RawComment.id).first()

            if first_unanalyzed:
                logging.warning("⚠️ 检测到未分析评论，但当前偏移量过高，尝试重置偏移量为 0")
                logging.debug(f"📌 最小可用评论ID: {first_unanalyzed[0]}")
                last_processed_id = 0
            else:
                logging.info("✅ 所有评论已完成分析")
                return
    finally:
        session.close()

    start_time = datetime.now()
    update_last_offset(db_manager, offset=last_processed_id, start_time=start_time)

    while True:
        sample_comments = get_sample_comments(
            db_manager,
            last_id=last_processed_id,
            batch_size=batch_size
        )
        logging.info(f"🔍 获取到 {len(sample_comments)} 条待分析评论")

        if not sample_comments:
            logging.info("✅ 所有评论已完成分析")
            break

        logging.info("🧠 正在分析评论...")
        analyzed_results = analyzer.batch_analyze(sample_comments)
        logging.info(f"🧠 成功分析 {len(analyzed_results or [])} 条评论")

        if not analyzed_results:
            empty_analysis_count += 1
            logging.warning(f"⚠️ 连续第 {empty_analysis_count} 次无有效分析结果")

            try:
                last_processed_id = max([get_comment_id(comment) for comment in sample_comments])
                logging.debug(f"📌 更新偏移量至: {last_processed_id}")
            except Exception as e:
                logging.error(f"❌ 提取最大ID失败: {str(e)}")
                last_processed_id = 0

            update_last_offset(db_manager, offset=last_processed_id)
            logging.info(f"📌 已更新偏移量为 {last_processed_id}")

            if empty_analysis_count >= MAX_EMPTY_ANALYSIS:
                logging.error("❌ 达到最大空分析次数，主动退出以避免死循环")
                break
            continue

        empty_analysis_count = 0  # 重置计数器

        save_analysis_results(db_manager, analyzed_results)
        total_processed += len(analyzed_results)

        try:
            last_processed_id = max([get_comment_id(comment) for comment in sample_comments])
            logging.debug(f"📌 更新偏移量至: {last_processed_id}")
        except Exception as e:
            logging.error(f"❌ 提取最大ID失败: {str(e)}")
            last_processed_id = 0

        update_last_offset(db_manager, offset=last_processed_id)
        logging.info(f"✅ 当前已处理 {total_processed} 条评论（最后处理ID：{last_processed_id}）")

    end_time = datetime.now()
    update_last_offset(db_manager, offset=last_processed_id, end_time=end_time)

    duration = (end_time - start_time).total_seconds()
    logging.info(f"⏱️ 本次任务共耗时 {duration:.2f} 秒，处理了 {total_processed} 条评论")

    if total_processed == 0:
        logging.warning("⚠️ 当前任务未处理任何评论，请检查以下内容：")
        logging.warning("   - 数据库中是否存在未分析的评论（is_analyzed = FALSE）")
        logging.warning("   - AI 分析是否返回空值")

    logging.info("=== 系统运行结束 ===")


def generate_report(comment_count):
    """生成终端报告"""
    from rich.console import Console
    from rich.table import Table

    console = Console()

    table = Table(title="数据采集汇总", show_header=True, header_style="bold magenta")
    table.add_column("指标", style="cyan")
    table.add_column("数量", justify="right")
    table.add_row("采集评论数", str(comment_count))
    console.print(table)


def get_video_ids_from_db(db_manager):
    """从数据库中读取所有视频源信息"""
    session = db_manager.Session()
    try:
        result = session.execute(
            text("SELECT id, video_id FROM d_video_sources WHERE video_id IS NOT NULL AND video_id != ''")
        )
        return [
            {
                "id": row[0],
                "video_id": row[1]
            } for row in result
        ]
    finally:
        session.close()


def save_analysis_results(db_manager, analyzed_results):
    max_retries = 3
    retry_count = 0
    
    while retry_count < max_retries:
        session = db_manager.Session()
        try:
            # 重新加载标签缓存
            cache = LabelCache(db_manager=db_manager)
            label_map = cache.label_map
            main_tag_map = cache.main_tag_map

            comment_records = []
            comment_text_map = {}  # 用于后续查找 comment_id 和 raw_comment.id

            for result in analyzed_results:
                res_dict = result.to_dict() if hasattr(result, 'to_dict') else result

                # 提取关键字段（不再使用 id）
                comment_text = res_dict.get("original_text")
                video_source_id = res_dict.get("video_source_id", 0)

                if not comment_text:
                    logging.warning("⚠️ 无有效评论内容，跳过该条记录")
                    continue

                # ✅ 强制校验 video_source_id 是否有效
                if not video_source_id:
                    logging.info(f"⚠️ 跳过无有效 video_source_id 的评论: {comment_text[:50]}...")
                    continue

                # 构建分析结果数据（不包含 id）
                comment_data = {
                    "video_source_id": video_source_id,
                    "original_text": comment_text,
                    "translated_text": res_dict.get("translated_text"),
                    "primary_label": res_dict.get("primary_label"),
                    "secondary_labels": json.dumps(res_dict.get("secondary_labels"), ensure_ascii=False),
                    "confidence_score": res_dict.get("confidence_score"),
                    "like_count": res_dict.get("like_count", 0),

                     # 新增字段
                    "platform": res_dict.get("platform"),
                    "parent_id": res_dict.get("parent_id"),
                    "parent_text": res_dict.get("parent_text"),
                    "top_comment_id": res_dict.get("top_comment_id")
                }

                comment_records.append(comment_data)
                # 缓存原始文本和视频源ID，用于后续查找真实 comment_id
                comment_text_map[(comment_text, video_source_id)] = None  # 占位符

            # 第一步：插入分析结果（由数据库分配自增ID）
            if comment_records:
                stmt = insert(AnalyzedComment.__table__).values(comment_records)
                update_cols = {col.name: col for col in AnalyzedComment.__table__.columns}
                session.execute(stmt.on_duplicate_key_update(update_cols))

            session.commit()

            # 初始化变量
            new_comments = []
            analyzed_ids = []

            # 第二步：根据 original_text 和 video_source_id 查询最新插入的评论
            if comment_text_map:
                query_conditions = [
                    and_(AnalyzedComment.original_text == text, AnalyzedComment.video_source_id == vsource)
                    for (text, vsource) in comment_text_map.keys()
                ]

                query = session.query(AnalyzedComment).filter(or_(*query_conditions))
                new_comments = query.all()

                # 建立映射关系
                for c in new_comments:
                    key = (c.original_text, c.video_source_id)
                    if key in comment_text_map:
                        comment_text_map[key] = c.id  # 填充 comment_id

            # 第三步：构建标签关联记录
            tag_records = []
            seen = set()  # 用 comment_id + tag_id 去重

            for result in analyzed_results:
                res_dict = result.to_dict() if hasattr(result, 'to_dict') else result

                comment_text = res_dict.get("original_text")
                video_source_id = res_dict.get("video_source_id", 0)

                key = (comment_text, video_source_id)
                comment_id = comment_text_map.get(key)

                if not comment_id:
                    continue  # 没有找到对应的 comment_id，跳过

                for tag_name in res_dict.get("secondary_labels", []):
                    tag_id = label_map.get(tag_name)
                    if not tag_id:
                        continue

                    if (comment_id, tag_id) in seen:
                        continue

                    seen.add((comment_id, tag_id))
                    tag_records.append({
                        "comment_id": comment_id,
                        "tag_id": tag_id
                    })

                    # 同时添加一级标签（如果有）
                    main_tag_id = main_tag_map.get(tag_name)
                    if main_tag_id and main_tag_id != tag_id:
                        if (comment_id, main_tag_id) not in seen:
                            seen.add((comment_id, main_tag_id))
                            tag_records.append({
                                "comment_id": comment_id,
                                "tag_id": main_tag_id
                            })

            # 第四步：批量写入 d_comment_tag 表
            if tag_records:
                session.bulk_insert_mappings(CommentTag, tag_records)

            # 第五步：更新原始评论状态 is_analyzed = True
            if comment_text_map:
                now = datetime.now()

                # 获取所有成功插入的 analyzed_comments.id
                analyzed_ids = [c.id for c in new_comments if c.id is not None]

                updated_count = session.query(RawComment).filter(
                    RawComment.id.in_(analyzed_ids)
                ).update(
                    {
                        RawComment.is_analyzed: True,
                        RawComment.analyzed_at: now
                    },
                    synchronize_session=False
                )

                logging.info(f"✅ 成功标记 {updated_count} 条原始评论为已分析")

            session.commit()
            logging.info(f"✅ 成功保存 {len(comment_records)} 条分析结果 和 {len(tag_records)} 条标签记录")
            logging.info(f"✅ 成功更新 {len(analyzed_ids)} 条原始评论为已分析")
            return  # 成功执行，退出重试循环

        except Exception as e:
            session.rollback()
            retry_count += 1
            if retry_count >= max_retries or ("MySQL server has gone away" not in str(e) and "Can't connect to MySQL server" not in str(e)):
                logging.error(f"❌ 保存分析结果失败: {str(e)}")
                raise
            else:
                logging.warning(f"⚠️ 数据库连接问题，第 {retry_count} 次重试...")
                time.sleep(2 ** retry_count)  # 指数退避
        finally:
            session.close()


def get_last_offset(db_manager):
    """
    从数据库中读取上次分析到的偏移量
    :param db_manager: DatabaseManager 实例
    :return: int
    """
    session = db_manager.Session()
    try:
        result = session.execute(
            text("SELECT last_processed_id FROM d_analysis_progress ORDER BY id DESC LIMIT 1")
        ).fetchone()
        return result[0] if result else 0
    finally:
        session.close()


def get_comment_id(comment):
    """
    安全获取评论ID
    :param comment: 评论对象（dict 或 ORM 实例）
    :return: int 评论ID
    """
    if isinstance(comment, dict):
        comment_id = comment.get("id")
        return int(comment_id) if comment_id is not None else None
    elif hasattr(comment, "id"):
        comment_id = getattr(comment, "id", None)
        return int(comment_id) if comment_id is not None else None
    return None


def run_stage_1_to_3(db_manager, platform_filter=None, discord_channel=None, video_id=None):
    config = Config()
    
    # ✅ 第一步：初始化 API Key 轮换器（关键！）
    if config.YT_API_KEYS:
        logging.info(f"🔁 初始化 API Key 轮换器（共 {len(config.YT_API_KEYS)} 个 Key）")
        from src.utils.youtube_client import initialize_api_key_rotator
        initialize_api_key_rotator(config.YT_API_KEYS)
    else:
        raise ValueError("❌ 未配置任何 YouTube API Key")

    # 确保 VideoSource 在所有情况下都被导入
    from models.comment_schema import VideoSource

    session = db_manager.Session()
    try:
        # 如果提供了特定的video_id，则只处理该视频
        if video_id:
            logging.info(f"🎯 使用指定的YouTube视频ID: {video_id}")
            # 创建临时源对象
            temp_source = VideoSource(
                platform="youtube",
                video_id=video_id,
                title=f"临时YouTube视频 {video_id}"
            )
            sources = [temp_source]
        # 如果提供了discord-channel参数，则临时创建一个源
        elif platform_filter == "discord" and discord_channel:
            logging.info(f"🎯 使用指定的Discord频道: {discord_channel}")
            # 创建临时源对象
            temp_source = VideoSource(
                platform="discord",
                video_id=discord_channel,
                title=f"临时Discord频道 {discord_channel}"
            )
            sources = [temp_source]
        else:
            sources = session.query(VideoSource).all()
            
        if not sources:
            logging.info("❌ 没有找到视频源信息，请检查 d_video_sources 表")
            return

        for source in sources:
            platform = source.platform.lower() or "youtube"

            # 如果指定了平台过滤器，则只处理该平台
            if platform_filter and platform != platform_filter:
                continue

            collector_class = None

            if platform == "youtube":
                from data_collector.platforms.youtube_collector import YouTubeCommentCollector
                collector_class = YouTubeCommentCollector
            elif platform == "facebook":
                from data_collector.platforms.facebook_collector import FacebookCommentCollector
                collector_class = FacebookCommentCollector
            elif platform == "discord":
                from data_collector.platforms.discord_collector import DiscordCommentCollector
                collector_class = DiscordCommentCollector
            else:
                logging.warning(f"⚠️ 不支持的平台: {platform}")
                continue

            video_id = source.video_id
            logging.info(f"🎥 [{platform}] 视频 {video_id} 开始采集评论...")

            try:
                # ✅ 这里传入的 api_keys 是已经初始化过的全局 Key 池
                collector = collector_class(api_keys=config.YT_API_KEYS)

                main_comments = collector.get_comments({"id": source.id if hasattr(source, 'id') else 0, "video_id": video_id}, max_comments_per_video=5000)
                
                # 只有当不是临时源时才保存到数据库
                if hasattr(source, 'id'):
                    session.add_all(main_comments)
                    session.commit()

                logging.info(f"✅ [{platform}] 主评论采集完成，共 {len(main_comments)} 条")
                video_info = {"id": source.id if hasattr(source, 'id') else 0, "video_id": video_id}
                if hasattr(collector, 'get_replies'):
                    logging.info(f"🗨️ [{platform}] 开始采集子评论...")
                    collector.get_replies(session, video_info)  # 使用完整的 video_info

            except Exception as e:
                logging.error(f"🚨 [{platform}] 视频 {video_id} 采集失败: {str(e)}", exc_info=True)
                if hasattr(source, 'id'):  # 只有非临时源才回滚
                    session.rollback()
            finally:
                if hasattr(source, 'id'):  # 只有非临时源才flush
                    session.flush()

    finally:
        session.close()


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="YouTube数据分析系统")
    parser.add_argument("--skip-collection", action="store_true", help="跳过数据采集和报告生成阶段")
    parser.add_argument("--max-process", type=int, default=None, help="最大处理评论数量（可选）")
    parser.add_argument("--refresh-label-cache", action="store_true", help="启动时刷新标签缓存")
    parser.add_argument("--run-scheduler", action="store_true", help="启动定时任务")
    parser.add_argument("--run-once", action="store_true", help="立即执行一次任务（仅阶段4）")
    parser.add_argument("--reprocess-all", action="store_true", help="强制重新分析所有评论")
    parser.add_argument("--force-reset", action="store_true", help="强制从 ID=0 开始分析")
    parser.add_argument("--stage", choices=["1-3", "4"], help="选择执行的阶段：1-3 为采集，4 为分析")
    parser.add_argument("--discord-channel", type=str, help="指定Discord频道，格式为 SERVER_ID/CHANNEL_ID")
    parser.add_argument("--platform", choices=["youtube", "facebook", "discord"], help="指定要采集的平台")
    parser.add_argument("--extract-subtitles", action="store_true", help="提取并翻译视频字幕")
    parser.add_argument("--video-id", type=str, default=None, help="指定视频ID进行处理")
    parser.add_argument("--translate-subtitles", action="store_true", help="翻译视频字幕")
    parser.add_argument("--batch", nargs='+', help="指定多个视频ID进行翻译")
    parser.add_argument("--all", action="store_true", help="翻译数据库中所有视频")
    parser.add_argument("--format", choices=['srt', 'json'], default='srt', help="输出格式")
    parser.add_argument("--download-youtube-video", type=str, help="根据视频ID下载 YouTube 视频")
    parser.add_argument(
        "--skip-translate",
        action="store_true",
        help="仅将已存在的字幕文件写入数据库，跳过调用翻译接口"
    )
    parser.add_argument(
        "--translator-type",
        choices=["deepseek", "chatgpt"],
        default="deepseek",
        help="选择字幕翻译所使用的模型（默认 deepseek）"
    )
    # 在参数解析部分添加proxy参数
    parser.add_argument("--proxy", type=str, help="代理设置，例如 http://127.0.0.1:7890")

    # ✅ 新增：Webshare 代理支持
    parser.add_argument("--webshare-username", type=str, help="Webshare 代理用户名")
    parser.add_argument("--webshare-password", type=str, help="Webshare 代理密码")

    args = parser.parse_args()
    config = Config()
    setup_logging()
    db_manager = DatabaseManager(config.DB_CONFIG)

    logging.info(f"🔌 当前数据库配置: {config.DB_CONFIG}")

    # ✅ 处理 Webshare 代理配置
    proxy_config = None
    if args.webshare_username and args.webshare_password:
        try:
            proxy_config = WebshareProxyConfig(
                proxy_username=args.webshare_username,
                proxy_password=args.webshare_password
            )
            logging.info(f"🛡️ 已配置 Webshare 代理: {args.webshare_username}")
        except Exception as e:
            logging.error(f"❌❌ 创建 Webshare 代理配置失败: {str(e)}")
            sys.exit(1)

    # ✅ 新增：处理视频下载
    if args.download_youtube_video:
        try:
            from src.main import YouTubeVideoDownloader
            downloader = YouTubeVideoDownloader(output_path="downloads/")
            downloader.download(args.download_youtube_video.strip())
            logging.info("✅ 视频下载完成")
            sys.exit(0)
        except Exception as e:
            logging.error(f"❌ 视频下载失败: {str(e)}")
            sys.exit(1)

    if args.run_once:
        logging.info("🚀 正在立即执行一次任务（仅阶段4）")
        main(skip_collection=True)
        logging.info("✅ 任务已执行完毕。")
    elif args.run_scheduler:
        scheduler = BlockingScheduler()
        scheduler.add_job(run_stage_4, 'cron', hour=1, minute=0)
        logging.info("⏰ 已启动定时任务，每天凌晨1点运行阶段4分析评论")

        try:
            scheduler.start()
        except KeyboardInterrupt:
            logging.info("收到中断信号，正在优雅地关闭调度器...")
            scheduler.shutdown(wait=False)
            logging.info("调度器已关闭，程序退出。")
    elif args.reprocess_all:
        logging.info("🔁 正在强制重新分析所有评论")
        session = db_manager.Session()
        try:
            session.execute(text("UPDATE d_raw_comments SET is_analyzed = false"))
            session.execute(text("UPDATE d_analysis_progress SET last_processed_id = 0"))
            session.commit()
        finally:
            session.close()
    elif args.stage == "1-3":
        run_stage_1_to_3(db_manager, args.platform, args.discord_channel, args.video_id)
    elif args.stage == "4":
        logging.info("🚀 启动阶段4：分析评论")
        run_stage_4(db_manager)
        logging.info("✅ 阶段4完成：评论已分析完毕")
    elif args.translate_subtitles:
        logging.info("🎬 正在翻译视频字幕...")
        # 如果提供了代理参数，则设置环境变量
        if args.proxy:
            os.environ["HTTP_PROXY"] = args.proxy
            os.environ["HTTPS_PROXY"] = args.proxy
            logging.info(f"使用代理: {args.proxy}")
        # 创建翻译器并传递代理配置
        translator = VideoSubtitleTranslator(
            output_dir="output/subtitles", 
            proxy_config=proxy_config  # ✅ 传递 Webshare 代理配置
        )
        if args.video_id:
            translator.translate_single_video(args.video_id, args.format)
        elif args.batch:
            translator.batch_translate_videos(args.batch, args.format)
        elif args.all:
            translator.translate_all_videos_in_db(db_manager, args.format)
        else:
            parser.print_help()
    elif args.extract_subtitles:
        logging.info("🎬 正在提取并翻译字幕...")
        from translate.subtitle_extractor import extract_and_translate_subtitles
        extract_and_translate_subtitles(
            db_manager,
            video_id=args.video_id,
            translator_type=args.translator_type,
            skip_translate=args.skip_translate
        )
        logging.info("✅ 字幕提取和翻译完成。")
    else:
        if args.refresh_label_cache:
            logging.info("🔄 初始化时刷新标签缓存")
            refresh_label_cache(db_manager)

        main(
            skip_collection=args.skip_collection,
            max_process=args.max_process
        )

    logging.info("=== 系统运行结束 ===")
