#!/usr/bin/env python3
"""
视频转录核心模块
"""

import json
import logging
from pathlib import Path
from faster_whisper import WhisperModel

logger = logging.getLogger(__name__)

# 尝试导入 torch，如果失败则 GPU 检测会返回 CPU
try:
    import torch
    TORCH_AVAILABLE = True
except ImportError:
    TORCH_AVAILABLE = False
    logger.warning("未安装 torch，GPU 加速功能不可用")


def detect_device():
    """
    自动检测可用的计算设备

    Returns:
        tuple: (device, compute_type)
            - device: 'cuda', 'mps', 'cpu'
            - compute_type: 'float16', 'int8', 'int8_float16'
    """
    if not TORCH_AVAILABLE:
        logger.info("torch 未安装，使用 CPU 模式")
        return 'cpu', 'int8'

    # 检测 CUDA (NVIDIA GPU)
    if torch.cuda.is_available():
        logger.info("✓ 检测到 CUDA GPU")
        return 'cuda', 'float16'

    # 检测 MPS (Apple Silicon)
    if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
        logger.info("✓ 检测到 Apple Silicon GPU (MPS)")
        return 'mps', 'float16'

    # 降级到 CPU
    logger.info("未检测到 GPU，使用 CPU")
    return 'cpu', 'int8'

# 繁简转换映射表（常见繁体字）
TRADITIONAL_TO_SIMPLIFIED = {
    '輸': '输', '咒': '咒', '養': '养', '開': '开', '進': '进', '經': '经',
    '動': '动', '輸': '输', '過': '过', '還': '还', '個': '个', '裡': '里',
    '時': '时', '間': '间', '點': '点', '種': '种', '應': '应', '該': '该',
    '會': '会', '現': '现', '們': '们', '來': '来', '對': '对', '國': '国',
    '為': '为', '沒': '没', '這': '这', '麼': '么', '處': '处', '從': '从',
    '現': '现', '環': '环', '組': '组', '結': '结', '資': '资', '產': '产',
    '電': '电', '視': '视', '網': '网', '業': '业', '東': '东', '歡': '欢',
    '樂': '乐', '聲': '声', '聽': '听', '覺': '觉', '話': '话', '語': '语',
    '說': '说', '講': '讲', '議': '议', '謝': '谢', '請': '请', '問': '问',
    '關': '关', '開': '开', '閉': '闭', '門': '门', '車': '车', '園': '园',
    '場': '场', '廳': '厅', '館': '馆', '樓': '楼', '層': '层', '業': '业',
    '備': '备', '裝': '装', '設': '设', '計': '计', '記': '记', '變': '变',
    '動': '动', '運': '运', '轉': '转', '識': '识', '認': '认', '證': '证',
    '題': '题', '問': '问', '題': '题', '際': '际', '際': '际', '實': '实',
    '戰': '战', '戲': '戏', '競': '竞', '賽': '赛', '機': '机', '構': '构',
    '標': '标', '準': '准', '確': '确', '護': '护', '據': '据', '報': '报',
}


def convert_to_simplified(text: str) -> str:
    """将繁体字转换为简体字"""
    try:
        # 尝试使用 opencc 库（如果安装了）
        import opencc
        converter = opencc.OpenCC('t2s')  # 繁体到简体（不要加 .json 后缀）
        return converter.convert(text)
    except ImportError:
        # 如果没有 opencc，使用简单的映射表
        result = []
        for char in text:
            result.append(TRADITIONAL_TO_SIMPLIFIED.get(char, char))
        return ''.join(result)
    except Exception as e:
        # OpenCC 错误时也使用简单映射表
        import logging
        logging.warning(f"OpenCC 转换失败，使用简单映射: {str(e)}")
        result = []
        for char in text:
            result.append(TRADITIONAL_TO_SIMPLIFIED.get(char, char))
        return ''.join(result)


class VideoTranscriber:
    """视频转录器"""

    def __init__(self, config):
        self.config = config
        self.model = None
        self._load_model()

    def _load_model(self):
        """加载模型"""
        model_config = self.config['model']

        # 获取设备配置
        device = model_config.get('device', 'auto')
        compute_type = model_config.get('compute_type', 'auto')

        # 如果设置为 auto，自动检测最佳设备
        if device == 'auto':
            device, auto_compute_type = detect_device()
            if compute_type == 'auto':
                compute_type = auto_compute_type
                logger.info(f"自动选择计算类型: {compute_type}")
        else:
            logger.info(f"使用配置的设备: {device}")
            if compute_type == 'auto':
                # 根据设备选择合适的计算类型
                if device in ['cuda', 'mps']:
                    compute_type = 'float16'
                else:
                    compute_type = 'int8'
                logger.info(f"自动选择计算类型: {compute_type}")

        logger.info(f"加载 Whisper 模型: {model_config['name']}")
        logger.info(f"  设备: {device}")
        logger.info(f"  计算类型: {compute_type}")

        try:
            self.model = WhisperModel(
                model_config['name'],
                device=device,
                compute_type=compute_type
            )
            logger.info("[OK] Whisper model loaded successfully")

            # 显示性能提示
            if device == 'cuda':
                logger.info("[GPU] Using NVIDIA GPU acceleration, expect 5-10x faster")
            elif device == 'mps':
                logger.info("[GPU] Using Apple Silicon GPU acceleration, expect 3-5x faster")
            else:
                logger.info("[CPU] Using CPU mode, slower performance. GPU recommended for faster processing")

        except Exception as e:
            logger.error(f"Model loading failed: {e}")
            # 如果 GPU 加载失败，尝试降级到 CPU
            if device in ['cuda', 'mps']:
                logger.warning("GPU loading failed, fallback to CPU mode")
                self.model = WhisperModel(
                    model_config['name'],
                    device='cpu',
                    compute_type='int8'
                )
                logger.info("[OK] CPU mode loaded successfully")
            else:
                raise

    def transcribe(self, video_path, language='zh'):
        """
        转录视频

        Args:
            video_path: 视频文件路径
            language: 语言代码，默认'zh'

        Returns:
            dict: 包含转录结果的字典
        """
        video_path = Path(video_path)

        if not video_path.exists():
            raise FileNotFoundError(f"视频文件不存在: {video_path}")

        logger.debug(f"开始转录视频: {video_path.name}")

        # 执行转录，强制输出简体中文
        # 使用 initial_prompt 引导模型输出简体中文
        # Whisper 模型默认可能输出繁体字，通过提示词引导输出简体
        segments, info = self.model.transcribe(
            str(video_path),
            language=language,
            initial_prompt="以下是普通话的句子，请使用简体中文。这是一段视频的语音内容。",
            # beam_size 设置较大值可以提高准确率
            beam_size=5,
            # best_of 设置为较大值获取更好的结果
            best_of=5,
            # 温度设置为0获取最确定的结果
            temperature=0.0
        )

        # 收集结果并转换为简体中文
        all_segments = []
        full_text = []

        for segment in segments:
            # 转换为简体中文
            text = convert_to_simplified(segment.text.strip())
            seg = {
                "start": segment.start,
                "end": segment.end,
                "text": text
            }
            all_segments.append(seg)
            full_text.append(text)

        # 保存结果
        output_dir = Path(self.config['output']['dir'])
        output_dir.mkdir(exist_ok=True)

        base_name = video_path.stem
        formats = self.config['output']['formats']
        output_files = []

        # 保存 TXT
        if 'txt' in formats:
            txt_file = output_dir / f"{base_name}.txt"
            txt_file.write_text('\n'.join(full_text), encoding='utf-8')
            output_files.append(str(txt_file))
            print(f"  ✓ 保存文本: {txt_file.name}")

        # 保存 SRT
        if 'srt' in formats:
            srt_file = output_dir / f"{base_name}.srt"
            self._save_srt(all_segments, srt_file)
            output_files.append(str(srt_file))
            print(f"  ✓ 保存字幕: {srt_file.name}")

        # 保存 JSON
        if 'json' in formats:
            json_file = output_dir / f"{base_name}.json"
            result_data = {
                "video": str(video_path),
                "language": info.language,
                "duration": info.duration,
                "segments": all_segments,
                "full_text": '\n'.join(full_text)
            }
            json_file.write_text(
                json.dumps(result_data, ensure_ascii=False, indent=2),
                encoding='utf-8'
            )
            output_files.append(str(json_file))
            print(f"  ✓ 保存JSON: {json_file.name}")

        return {
            "video": str(video_path),
            "language": info.language,
            "duration": info.duration,
            "segments_count": len(all_segments),
            "text": '\n'.join(full_text),
            "output_files": output_files
        }

    def _save_srt(self, segments, output_path):
        """保存SRT字幕文件"""
        with open(output_path, 'w', encoding='utf-8') as f:
            for i, seg in enumerate(segments, 1):
                start = self._format_timestamp(seg['start'])
                end = self._format_timestamp(seg['end'])
                f.write(f"{i}\n")
                f.write(f"{start} --> {end}\n")
                f.write(f"{seg['text']}\n\n")

    def _format_timestamp(self, seconds):
        """格式化时间戳为 SRT 格式"""
        hours = int(seconds // 3600)
        minutes = int((seconds % 3600) // 60)
        secs = int(seconds % 60)
        millis = int((seconds % 1) * 1000)
        return f"{hours:02d}:{minutes:02d}:{secs:02d},{millis:03d}"
