#!/usr/bin/env python3
"""
Worker主程序
- 扫描Excel文件
- 批量上报作品信息
- 转录视频
- 上报转录结果
"""

import json
import os
import sys
import logging
import argparse
import time
from pathlib import Path
from glob import glob

from parsers.excel_parser import ExcelParser
from client.api_client import APIClient
from core.cache import CacheManager
from core.transcriber import VideoTranscriber
from core.polish import TextPolisher, load_env_config
from core.pipeline import PipelineProcessor
from utils.text_validator import check_text_quality, suggest_transcribe_fix


def setup_logging(config: dict):
    """配置日志"""
    log_config = config.get('log', {})
    log_dir = os.path.dirname(log_config.get('file', 'logs/worker.log'))
    os.makedirs(log_dir, exist_ok=True)

    # 文件日志使用 UTF-8 编码
    file_handler = logging.FileHandler(
        log_config.get('file', 'logs/worker.log'),
        encoding='utf-8'
    )
    file_handler.setFormatter(
        logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    )

    # 控制台日志：Windows 使用 UTF-8，其他系统使用默认编码
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(
        logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    )

    # Windows 平台强制使用 UTF-8 编码
    if sys.platform == 'win32':
        try:
            # 尝试设置控制台为 UTF-8 模式
            import codecs
            if sys.stdout.encoding != 'utf-8':
                sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'ignore')
                sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'ignore')
        except Exception:
            # 如果失败，使用错误处理器避免崩溃
            console_handler.setFormatter(
                logging.Formatter(
                    '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    # 移除 emoji 等特殊字符
                )
            )

    logging.basicConfig(
        level=log_config.get('level', 'INFO'),
        handlers=[file_handler, console_handler],
    )


def safe_log_text(text: str, max_length: int = 100) -> str:
    """
    安全处理日志文本，移除可能导致编码错误的字符

    Args:
        text: 原始文本
        max_length: 最大长度

    Returns:
        处理后的文本
    """
    if not text:
        return 'N/A'

    # Windows 平台移除 emoji 等特殊字符
    if sys.platform == 'win32':
        # 保留基本字符：中文、英文、数字、常见标点
        import re
        text = re.sub(r'[^\u4e00-\u9fa5\u3000-\u303fa-zA-Z0-9\s\-_,;.!?()（）【】《》""''：；，。！？]', '', text)

    # 截断过长文本
    if len(text) > max_length:
        text = text[:max_length] + '...'

    return text.strip() or 'N/A'


def load_config(config_path: str) -> dict:
    """加载配置文件"""
    with open(config_path, 'r', encoding='utf-8') as f:
        return json.load(f)


def convert_path(path: str, path_mapping: dict) -> str:
    """
    转换路径（Windows -> Mac/Linux）

    Args:
        path: 原始路径
        path_mapping: 路径映射字典

    Returns:
        转换后的路径
    """
    if not path or not path_mapping:
        return path

    # 替换反斜杠为正斜杠
    normalized_path = path.replace('\\', '/')

    # 尝试匹配并替换路径前缀
    for win_prefix, unix_prefix in path_mapping.items():
        # 标准化映射前缀
        win_prefix_normalized = win_prefix.replace('\\', '/')

        if normalized_path.startswith(win_prefix_normalized):
            # 替换前缀
            converted_path = normalized_path.replace(
                win_prefix_normalized, unix_prefix, 1
            )
            return converted_path

    return path


def scan_excel_files(watch_dir: str) -> list:
    """扫描Excel文件"""
    pattern = os.path.join(watch_dir, '**/*.xlsx')
    files = glob(pattern, recursive=True)
    return [f for f in files if not f.startswith('~')]  # 排除临时文件


def process_excel(
    excel_path: str,
    config: dict,
    excel_parser: ExcelParser,
    api_client: APIClient,
    cache_manager: CacheManager,
    transcriber: VideoTranscriber,
):
    """处理单个Excel文件"""
    logger = logging.getLogger(__name__)
    logger.info(f"开始处理: {excel_path}")

    try:
        # 1. 识别平台
        platform_name, platform_code = excel_parser.detect_platform(excel_path)
        logger.info(f"识别平台: {platform_name} (code={platform_code})")

        # 2. 解析Excel
        works = excel_parser.parse(excel_path, platform_code)
        logger.info(f"解析到 {len(works)} 条作品记录")

        if not works:
            logger.warning("没有有效的作品记录")
            return

        # 3. 批量上报作品信息
        batch_name = Path(excel_path).stem
        worker_machine = config['worker']['machine_name']

        result = api_client.batch_create_works(
            batch_name=batch_name,
            worker_machine=worker_machine,
            platform=platform_code,
            works=works,
        )

        if not result.get('success'):
            logger.error(f"上报作品信息失败: {result.get('message')}")
            return

        # 4. 保存状态
        cache_manager.save_batch_status(
            batch_id=result['batchId'],
            need_transcribe=result.get('needTranscribeList', []),
        )

        # 5. 转录处理
        need_transcribe = result.get('needTranscribeList', [])
        logger.info(f"需要转录 {len(need_transcribe)} 个作品")

        for item in need_transcribe:
            try:
                # 查找对应的work数据
                work = next(
                    (w for w in works if w['workId'] == item['workId']), None
                )

                if not work:
                    logger.warning(f"找不到作品数据: {item['workId']}")
                    continue

                # 判断是否需要转录
                if work['workType'] != 0:
                    logger.info(f"跳过非视频: {work['workId']}")
                    continue

                if not work.get('isDownloaded') or not work.get('savePath'):
                    logger.info(f"跳过未下载: {work['workId']}")
                    continue

                # 转换路径（Windows -> Mac/Linux）
                path_mapping = config.get('worker', {}).get('path_mapping', {})
                original_path = work['savePath']
                converted_path = convert_path(original_path, path_mapping)

                if converted_path != original_path:
                    logger.debug(
                        f"路径转换: {original_path} -> {converted_path}"
                    )

                if not os.path.exists(converted_path):
                    logger.warning(f"文件不存在: {converted_path}")
                    continue

                logger.info(f"开始转录: {work['workId']}")

                # 转录
                start_time = time.time()
                transcribe_result = transcriber.transcribe(
                    converted_path, language='zh'
                )
                transcribe_time = time.time() - start_time

                raw_text = transcribe_result['text']

                # 检测是否为纯音乐（无有效人声）
                def is_pure_music(text):
                    """检测文本是否为纯音乐（无有效人声内容）"""
                    if not text or not text.strip():
                        return True

                    # 去除空白字符
                    text = text.strip()

                    # 如果文本太短（少于10个字符），可能是纯音乐
                    if len(text) < 10:
                        return True

                    # 计算有效中文字符占比
                    chinese_chars = sum(1 for c in text if '\u4e00' <= c <= '\u9fff')
                    total_chars = len(text.replace(' ', '').replace('\n', ''))

                    if total_chars == 0:
                        return True

                    chinese_ratio = chinese_chars / total_chars

                    # 如果中文字符占比低于20%，且文本主要是字母/符号，可能是音乐识别噪音
                    if chinese_ratio < 0.2 and total_chars > 50:
                        # 检查是否主要是单字母或短词（音乐识别特征）
                        words = text.replace('-', ' ').split()
                        short_words = sum(1 for w in words if len(w) <= 3)
                        if len(words) > 0 and short_words / len(words) > 0.8:
                            return True

                    return False

                is_music = is_pure_music(raw_text)

                if is_music:
                    logger.info(f"检测到纯音乐视频: {work['workId']}，标记为纯音乐")
                    raw_text = "[纯音乐]"

                # 检查转录文本质量
                quality_result = check_text_quality(raw_text)
                has_transcribe_error = False

                if not quality_result['is_valid'] and not is_music:
                    logger.warning(
                        f"转录质量异常: {work['workId']} - {quality_result['reason']}"
                    )
                    suggestion = suggest_transcribe_fix(quality_result)
                    if suggestion:
                        logger.info(f"建议: {suggestion}")
                    has_transcribe_error = True

                # 构造上报数据
                transcription = {
                    'rawText': raw_text,
                    'language': transcribe_result.get('language', 'zh'),
                    'duration': transcribe_result.get('duration', 0),
                    'processingTime': transcribe_time,
                    'engine': 'whisper-tiny',
                    'segmentCount': transcribe_result.get('segments_count', 0),
                    'polishedText': None,
                    'polishStatus': None,
                    'polishEngine': None,
                }

                # 润色 (可选) - 纯音乐或转录异常跳过润色
                skip_polish = is_music or has_transcribe_error

                # 如果因转录异常跳过润色，标记状态为 '3' (转录异常)
                if has_transcribe_error:
                    transcription['polishStatus'] = '3'  # 3 = 转录异常，跳过润色
                    logger.info(f"因转录质量异常，跳过润色: {work['workId']}")

                if not skip_polish and config['polish'].get('enabled'):
                    try:
                        logger.info(f"开始润色: {work['workId']}")
                        polish_start = time.time()

                        # 加载环境变量配置（优先）或使用config中的配置
                        try:
                            polish_config = load_env_config()
                            if not polish_config.get('api_key'):
                                logger.warning(f"未配置 API Key，跳过润色: {work['workId']}")
                                raise ValueError("API Key not configured")
                        except Exception as e:
                            logger.debug(f"从环境变量加载配置失败: {str(e)}")
                            polish_config = config['polish']
                            if not polish_config.get('api_key'):
                                logger.warning(f"配置文件中也没有 API Key，跳过润色: {work['workId']}")
                                raise ValueError("API Key not configured")

                        polisher = TextPolisher(polish_config)

                        # 构建上下文信息
                        context = {
                            'title': work.get('title'),
                            'author': work.get('author'),
                            'keyword': work.get('searchKeyword'),
                            'platform': platform_code,
                        }

                        polish_result = polisher.polish(raw_text, context)
                        polish_time = time.time() - polish_start

                        if polish_result['status'] == 1:
                            transcription['polishedText'] = polish_result[
                                'polished_text'
                            ]
                            transcription['polishStatus'] = 1  # 成功
                            # 记录实际使用的模型（可能是降级模型）
                            transcription['polishEngine'] = polish_result.get(
                                'used_model', polish_config.get('model', 'deepseek-chat')
                            )
                            transcription['processingTime'] = (
                                transcribe_time + polish_time
                            )

                            # 如果使用了降级模型，记录日志
                            used_model = polish_result.get('used_model', '')
                            primary_model = polish_config.get('model', '')
                            if used_model and used_model != primary_model:
                                logger.info(
                                    f"润色完成: {work['workId']} (耗时: {polish_time:.2f}秒, 使用降级模型: {used_model})"
                                )
                            else:
                                logger.info(
                                    f"润色完成: {work['workId']} (耗时: {polish_time:.2f}秒)"
                                )
                        else:
                            transcription['polishStatus'] = 2  # 失败
                            logger.warning(
                                f"润色失败: {work['workId']} - {polish_result.get('error')}"
                            )
                    except Exception as e:
                        logger.error(f"润色异常: {work['workId']} - {str(e)}")
                        transcription['polishStatus'] = 2  # 失败

                # 上报转录结果
                submit_result = api_client.submit_transcription(
                    platform=platform_code,
                    work_id=work['workId'],
                    worker_machine=worker_machine,
                    transcription=transcription,
                )

                if submit_result.get('success'):
                    logger.info(f"转录完成: {work['workId']}")
                else:
                    logger.error(f"上报转录失败: {submit_result.get('message')}")

            except Exception as e:
                logger.error(f"转录失败: {work.get('workId')} - {str(e)}")
                cache_manager.save_failed_work(work, str(e))

        # 6. 标记文件已处理
        cache_manager.mark_file_processed(excel_path, result)
        logger.info(f"处理完成: {excel_path}")

    except Exception as e:
        logger.error(f"处理Excel失败: {str(e)}", exc_info=True)


def repolish_works(
    config: dict,
    api_client: APIClient,
    platform_code: int,
    batch_id: int = None
):
    """
    补充润色已转录但未润色的作品

    Args:
        config: 配置字典
        api_client: API客户端
        platform_code: 平台代码
        batch_id: 批次ID（可选）
    """
    logger = logging.getLogger(__name__)

    # 检查润色是否启用
    if not config['polish'].get('enabled'):
        logger.error("润色功能未启用，无法执行补充润色")
        return

    logger.info("=" * 60)
    logger.info("开始补充润色")
    logger.info("=" * 60)

    # 获取需要润色的作品列表
    result = api_client.get_need_repolish(platform_code, batch_id, limit=1000)

    if not result.get('success'):
        logger.error(f"获取需要润色列表失败: {result.get('message')}")
        return

    works = result.get('works', [])
    total = len(works)

    if total == 0:
        logger.info("没有需要补充润色的作品")
        return

    logger.info(f"找到 {total} 个需要润色的作品")

    # 加载润色配置
    try:
        polish_config = load_env_config()
        if not polish_config.get('api_key'):
            logger.error("未配置 DeepSeek API Key")
            return
    except Exception as e:
        logger.error(f"加载润色配置失败: {str(e)}")
        return

    polisher = TextPolisher(polish_config)
    polished_results = []
    success_count = 0
    failed_count = 0

    # 获取批量上传大小（可配置）
    batch_size = config['polish'].get('batch_size', 20)
    logger.info(f"批量上传大小: {batch_size} 条")

    # 逐个润色
    for idx, work in enumerate(works, 1):
        try:
            # 安全处理标题，避免 Windows GBK 编码错误
            safe_title = safe_log_text(work.get('title', 'N/A'))
            logger.info(f"[{idx}/{total}] 润色: {work['workId']} - {safe_title}")

            # 构建上下文
            context = {
                'title': work.get('title'),
                'author': work.get('author'),
                'platform': platform_code,
            }

            # 执行润色（只使用 chat 模型，不自动重试）
            polish_result = polisher.polish(work['rawText'], context, model='deepseek-chat')

            if polish_result['status'] == 1:
                # 润色成功
                used_model = polish_result.get('used_model', 'deepseek-chat')

                polished_results.append({
                    'platform': work['platform'],
                    'workId': work['workId'],
                    'databaseId': work.get('databaseId'),
                    'polishedText': polish_result['polished_text'],
                    'polishStatus': 1,
                    'polishEngine': used_model,
                    'qualityScore': polish_result.get('quality_score'),
                })
                success_count += 1
                logger.info(f"  润色成功 (评分: {polish_result.get('quality_score', 'N/A')})")
            else:
                # 润色失败，上传失败记录（状态=2）
                failed_count += 1
                logger.warning(f"  润色失败: {polish_result.get('error')}")

                # 将失败记录也上传到后台
                polished_results.append({
                    'platform': work['platform'],
                    'workId': work['workId'],
                    'databaseId': work.get('databaseId'),
                    'polishedText': '',  # 失败时为空
                    'polishStatus': 2,  # 失败状态
                    'polishEngine': 'deepseek-chat',
                    'qualityScore': 0,
                })

            # 每 N 条批量上传一次
            if len(polished_results) >= batch_size:
                logger.info(f">>> 已累积 {len(polished_results)} 条，开始批量上传...")
                result = api_client.batch_update_polish(polished_results)
                if result.get('success'):
                    logger.info(f">>> 批量上传成功: 更新 {result.get('updated', 0)} 条, 失败 {result.get('failed', 0)} 条")
                else:
                    logger.error(f">>> 批量上传失败: {result.get('message', 'Unknown error')}")
                polished_results = []

        except Exception as e:
            failed_count += 1
            logger.error(f"  润色异常: {str(e)}")

    # 上传剩余的润色结果
    if polished_results:
        logger.info(f">>> 上传剩余 {len(polished_results)} 条润色结果...")
        result = api_client.batch_update_polish(polished_results)
        if result.get('success'):
            logger.info(f">>> 最终上传成功: 更新 {result.get('updated', 0)} 条, 失败 {result.get('failed', 0)} 条")
        else:
            logger.error(f">>> 最终上传失败: {result.get('message', 'Unknown error')}")

    logger.info("=" * 60)
    logger.info(f"补充润色完成: 成功 {success_count} 条, 失败 {failed_count} 条")
    logger.info("=" * 60)


def retry_failed_works(config: dict, api_client: APIClient, transcriber: VideoTranscriber, platform: int):
    """
    重试失败的记录（重新转录 + 两层润色）

    Args:
        config: 配置
        api_client: API客户端
        transcriber: 转录器（已配置高级模型）
        platform: 平台代码
    """
    import logging
    logger = logging.getLogger(__name__)

    logger.info("=" * 60)
    logger.info("开始重试失败的记录")
    logger.info("=" * 60)

    # 获取需要重试的作品（转录失败 + 润色失败）
    works = api_client.get_need_repolish(platform, limit=1000)
    total = len(works)

    if total == 0:
        logger.info("没有需要重试的作品")
        return

    logger.info(f"找到 {total} 个需要重试的作品")

    # 加载润色配置
    try:
        polish_config = load_env_config()
        if not polish_config.get('api_key'):
            logger.error("未配置 DeepSeek API Key")
            return
    except Exception as e:
        logger.error(f"加载润色配置失败: {str(e)}")
        return

    polisher = TextPolisher(polish_config)
    polished_results = []
    success_count = 0
    failed_count = 0
    batch_size = config['polish'].get('batch_size', 20)

    logger.info(f"批量上传大小: {batch_size} 条")
    logger.info(f"Whisper模型: {transcriber.model_name}")

    # 逐个处理
    for idx, work in enumerate(works, 1):
        try:
            safe_title = safe_log_text(work.get('title', 'N/A'))
            logger.info(f"[{idx}/{total}] 重试: {work['workId']} - {safe_title}")

            # 步骤1：重新转录（如果有音频路径）
            raw_text = work.get('rawText')
            audio_path = work.get('audioPath')  # 假设后台返回音频路径

            if audio_path and os.path.exists(audio_path):
                logger.info(f"  重新转录音频: {audio_path}")
                try:
                    transcribe_result = transcriber.transcribe(audio_path)
                    raw_text = transcribe_result.get('text', '')

                    # 质量检测
                    quality_result = check_text_quality(raw_text)
                    if not quality_result['is_valid']:
                        logger.warning(f"  转录质量异常: {quality_result['reason']}")
                        failed_count += 1

                        # 上传转录失败记录
                        polished_results.append({
                            'platform': work['platform'],
                            'workId': work['workId'],
                            'databaseId': work.get('databaseId'),
                            'polishedText': '',
                            'polishStatus': 2,
                            'polishEngine': 'transcribe-failed',
                            'qualityScore': 0,
                        })
                        continue

                    logger.info(f"  重新转录成功，文本长度: {len(raw_text)}")
                except Exception as e:
                    logger.error(f"  转录失败: {str(e)}")
                    failed_count += 1
                    continue
            else:
                logger.info(f"  使用原有转录文本（长度: {len(raw_text)}）")

            # 构建上下文
            context = {
                'title': work.get('title'),
                'author': work.get('author'),
                'platform': platform,
            }

            # 步骤2：第一次润色（chat 模型）
            logger.info("  尝试润色 (deepseek-chat)...")
            polish_result = polisher.polish(raw_text, context, model='deepseek-chat')

            if polish_result['status'] == 1:
                # Chat 成功
                polished_results.append({
                    'platform': work['platform'],
                    'workId': work['workId'],
                    'databaseId': work.get('databaseId'),
                    'polishedText': polish_result['polished_text'],
                    'polishStatus': 1,
                    'polishEngine': 'deepseek-chat',
                    'qualityScore': polish_result.get('quality_score'),
                })
                success_count += 1
                logger.info(f"  润色成功 (chat, 评分: {polish_result.get('quality_score', 'N/A')})")
            else:
                # 步骤3：Chat 失败，尝试 reasoner 模型
                logger.warning(f"  Chat模型失败: {polish_result.get('error')}")
                logger.info("  尝试推理模型 (deepseek-reasoner)...")

                polish_result = polisher.polish(raw_text, context, model='deepseek-reasoner')

                if polish_result['status'] == 1:
                    # Reasoner 成功
                    polished_results.append({
                        'platform': work['platform'],
                        'workId': work['workId'],
                        'databaseId': work.get('databaseId'),
                        'polishedText': polish_result['polished_text'],
                        'polishStatus': 1,
                        'polishEngine': 'deepseek-reasoner',
                        'qualityScore': polish_result.get('quality_score'),
                    })
                    success_count += 1
                    logger.info(f"  润色成功 (reasoner, 评分: {polish_result.get('quality_score', 'N/A')})")
                else:
                    # 两个模型都失败，打印详细信息
                    failed_count += 1
                    logger.error("=" * 80)
                    logger.error("所有模型均失败，详细信息：")
                    logger.error("=" * 80)
                    logger.error(f"Work ID: {work['workId']}")
                    logger.error(f"标题: {work.get('title', 'N/A')}")
                    logger.error("-" * 80)
                    logger.error(f"传入文本长度: {len(raw_text)} 字符")
                    logger.error(f"传入文本(前500字符):")
                    logger.error(raw_text[:500] if raw_text else "N/A")
                    logger.error("-" * 80)
                    logger.error(f"最后错误: {polish_result.get('error')}")
                    api_resp = polish_result.get('api_response')
                    if api_resp:
                        logger.error(f"API响应(前500字符):")
                        logger.error(str(api_resp)[:500] if api_resp else "N/A")
                    logger.error("=" * 80)

                    # 上传失败记录
                    polished_results.append({
                        'platform': work['platform'],
                        'workId': work['workId'],
                        'databaseId': work.get('databaseId'),
                        'polishedText': '',
                        'polishStatus': 2,  # 失败状态
                        'polishEngine': 'deepseek-reasoner',
                        'qualityScore': 0,
                    })

            # 批量上传
            if len(polished_results) >= batch_size:
                logger.info(f">>> 已累积 {len(polished_results)} 条，开始批量上传...")
                result = api_client.batch_update_polish(polished_results)
                if result.get('success'):
                    logger.info(f">>> 批量上传成功: 更新 {result.get('updated', 0)} 条, 失败 {result.get('failed', 0)} 条")
                else:
                    logger.error(f">>> 批量上传失败: {result.get('message', 'Unknown error')}")
                polished_results = []

        except Exception as e:
            failed_count += 1
            logger.error(f"  处理异常: {str(e)}")

    # 上传剩余结果
    if polished_results:
        logger.info(f">>> 上传剩余 {len(polished_results)} 条结果...")
        result = api_client.batch_update_polish(polished_results)
        if result.get('success'):
            logger.info(f">>> 最终上传成功: 更新 {result.get('updated', 0)} 条, 失败 {result.get('failed', 0)} 条")
        else:
            logger.error(f">>> 最终上传失败: {result.get('message', 'Unknown error')}")

    logger.info("=" * 60)
    logger.info(f"重试完成: 成功 {success_count} 条, 失败 {failed_count} 条")
    logger.info("=" * 60)


def ensure_directories():
    """
    确保必要的目录存在

    自动创建以下目录:
    - logs/     日志文件
    - cache/    缓存数据和处理状态
    - output/   转录输出文件（可选）
    """
    dirs = ['logs', 'cache', 'output']
    for dir_name in dirs:
        dir_path = os.path.join(os.path.dirname(__file__), '..', dir_name)
        os.makedirs(dir_path, exist_ok=True)


def main():
    """主程序"""
    # 确保必要的目录存在
    ensure_directories()

    parser = argparse.ArgumentParser(description='Worker转录程序')
    parser.add_argument(
        '--config',
        default='config/worker_config.json',
        help='配置文件路径',
    )
    parser.add_argument('--excel', help='处理指定的Excel文件')
    parser.add_argument(
        '--repolish',
        action='store_true',
        help='重新润色已转录但未润色的作品（使用chat模型，失败不重试）',
    )
    parser.add_argument(
        '--retry-failed',
        action='store_true',
        help='重试所有失败的记录（重新转录+两层润色chat→reasoner）',
    )
    parser.add_argument(
        '--whisper-model',
        default=None,
        help='指定Whisper模型（base/small/medium/large），用于--retry-failed',
    )
    parser.add_argument(
        '--pipeline',
        action='store_true',
        help='流水线模式：阶段1快速批量（Tiny+Chat）→ 阶段2智能升级（Base/Small+Reasoner）',
    )

    args = parser.parse_args()

    # 加载配置
    if not os.path.exists(args.config):
        # 尝试旧路径
        old_config_path = 'worker_config.json'
        if os.path.exists(old_config_path):
            print(f"⚠️  使用旧配置路径: {old_config_path}")
            print(f"建议移动到新位置: {args.config}")
            args.config = old_config_path
        else:
            print(f"配置文件不存在: {args.config}")
            print("请复制 config/worker_config.example.json 为 config/worker_config.json 并修改配置")
            sys.exit(1)

    config = load_config(args.config)

    # 配置日志
    setup_logging(config)
    logger = logging.getLogger(__name__)

    # 准备 API 配置（兼容新旧格式）
    if 'api' in config and 'base_url' in config['api']:
        # 新格式：api.base_url + api.prefix
        api_config = {
            'api_url': config['api']['base_url'],
            'api_prefix': config['api'].get('prefix', '/api'),  # 默认 /api
            'timeout': config.get('backend', {}).get('timeout', 30),
            'retry_times': config.get('backend', {}).get('retry_times', 3),
            'retry_interval': config.get('backend', {}).get('retry_interval', 5),
        }
    else:
        # 旧格式：backend.api_url（兼容，不带 prefix）
        api_config = config['backend']
        api_config['api_prefix'] = '/api'  # 旧格式默认使用 /api

    logger.info("=" * 60)
    logger.info("Worker转录程序启动")
    logger.info(f"机器标识: {config['worker']['machine_name']}")
    logger.info(f"后台API: {api_config['api_url']}")
    logger.info(f"API前缀: {api_config['api_prefix']}")
    logger.info("=" * 60)

    # 初始化组件
    excel_parser = ExcelParser()
    api_client = APIClient(api_config)
    cache_manager = CacheManager(config.get('cache', {'dir': './cache'}))

    # 准备 transcriber 配置（兼容旧格式）
    transcriber_config = {
        'model': {
            'name': config['transcription']['model'],
            'device': config['transcription']['device'],
            'compute_type': config['transcription']['compute_type']
        },
        'output': {
            'dir': './output',
            'formats': []  # 不保存文件，只返回文本
        }
    }
    transcriber = VideoTranscriber(transcriber_config)

    # 检查后台连接
    if not api_client.health_check():
        logger.warning("无法连接后台API，请检查配置")

    # 如果是流水线模式
    if args.pipeline:
        logger.info("流水线模式（阶段1快速+阶段2智能升级）")
        if args.excel:
            # 从 Excel 解析作品
            excel_parser = ExcelParser()
            platform_name, platform_code = excel_parser.detect_platform(args.excel)
            logger.info(f"识别平台: {platform_name} (code={platform_code})")

            # 解析 Excel
            works_data = excel_parser.parse(args.excel, platform_code)
            if not works_data or len(works_data) == 0:
                logger.error("Excel文件解析失败或无数据")
                return

            # 步骤1：批量上报作品信息（创建 work_info 和 transcribe_result）
            batch_name = Path(args.excel).stem
            worker_machine = config['worker']['machine_name']

            logger.info(f"批量上报作品信息: {len(works_data)} 条")
            result = api_client.batch_create_works(
                batch_name=batch_name,
                worker_machine=worker_machine,
                platform=platform_code,
                works=works_data,
            )

            if not result.get('success'):
                logger.error(f"上报作品信息失败: {result.get('message')}")
                return

            logger.info(f"作品信息上报成功, 批次ID: {result.get('batchId')}")

            # 步骤2：合并 databaseId 到 works_data
            need_transcribe = result.get('needTranscribeList', [])
            for work in works_data:
                matched = next((item for item in need_transcribe if item['workId'] == work['workId']), None)
                if matched:
                    work['databaseId'] = matched.get('databaseId')

            # 初始化润色器
            try:
                polish_config = load_env_config()
                if not polish_config.get('api_key'):
                    logger.error("未配置 DeepSeek API Key")
                    return
                polisher = TextPolisher(polish_config)
            except Exception as e:
                logger.error(f"加载润色配置失败: {str(e)}")
                return

            # 创建流水线处理器
            processor = PipelineProcessor(config, api_client, transcriber, polisher)

            # 执行流水线处理
            processor.process(works_data, args.excel)
        else:
            logger.error("流水线模式需要指定 --excel 参数")
        return

    # 如果是重试失败模式
    if args.retry_failed:
        logger.info("重试失败记录模式（重新转录+两层润色）")
        if args.excel:
            # 从 Excel 识别平台
            excel_parser = ExcelParser()
            platform_name, platform_code = excel_parser.detect_platform(args.excel)
            logger.info(f"识别平台: {platform_name} (code={platform_code})")

            # 覆盖 Whisper 模型配置（如果指定）
            if args.whisper_model:
                logger.info(f"使用指定的Whisper模型: {args.whisper_model}")
                transcriber_config['model']['name'] = args.whisper_model
                transcriber = VideoTranscriber(transcriber_config)

            retry_failed_works(config, api_client, transcriber, platform_code)
        else:
            logger.error("重试失败模式需要指定 --excel 参数来识别平台")
        return

    # 如果是补充润色模式
    if args.repolish:
        logger.info("补充润色模式（仅润色，chat模型，失败不重试）")
        if args.excel:
            # 从 Excel 识别平台
            excel_parser = ExcelParser()
            platform_name, platform_code = excel_parser.detect_platform(args.excel)
            logger.info(f"识别平台: {platform_name} (code={platform_code})")
            repolish_works(config, api_client, platform_code)
        else:
            logger.error("补充润色模式需要指定 --excel 参数来识别平台")
        return

    # 扫描Excel文件
    if args.excel:
        # 处理指定文件
        excel_files = [args.excel]
    else:
        # 扫描目录
        excel_files = scan_excel_files(config['worker']['excel_watch_dir'])

    logger.info(f"发现 {len(excel_files)} 个Excel文件")

    # 过滤已处理文件
    new_files = [
        f for f in excel_files if not cache_manager.is_file_processed(f)
    ]

    logger.info(f"待处理: {len(new_files)} 个文件")

    if not new_files:
        logger.info("没有新的Excel文件需要处理")
        return

    # 逐个处理
    for excel_path in new_files:
        process_excel(
            excel_path,
            config,
            excel_parser,
            api_client,
            cache_manager,
            transcriber,
        )

    logger.info("=" * 60)
    logger.info("全部处理完成")
    logger.info("=" * 60)


if __name__ == '__main__':
    main()
