import whisper
import torch
import logging

logger = logging.getLogger(__name__)

class WorkingWhisperASR:
    """使用OpenAI Whisper库的GPU实现"""

    def __init__(self, device='cuda:9', model_size='base'):
        self.model_size = model_size
        self.requested_device = device

        # 强制使用GPU cuda:9
        if torch.cuda.is_available():
            gpu_id = device.split(':')[-1] if ':' in device else '9'
            try:
                gpu_id = int(gpu_id)
                if gpu_id < torch.cuda.device_count():
                    self.device = device
                    logger.info(f"🚀 强制使用GPU: {device}")
                    logger.info(f"📊 GPU数量: {torch.cuda.device_count()}")
                    logger.info(f"🔥 当前GPU: {torch.cuda.get_device_name(gpu_id)}")
                else:
                    raise Exception(f"GPU {device} 不可用，可用GPU数量: {torch.cuda.device_count()}")
            except (ValueError, IndexError) as e:
                raise Exception(f"无效的设备格式: {device}, 错误: {e}")
        else:
            raise Exception("CUDA不可用，无法使用GPU模式")

        logger.info(f"📥 加载Whisper模型: {model_size}")

        try:
            # 强制使用GPU模式
            logger.info(f"🎯 正在GPU {self.device} 上加载模型...")
            self.model = whisper.load_model(model_size, device=self.device)
            logger.info(f"✅ Whisper初始化完成，使用GPU: {self.device}")

            # 验证模型确实在GPU上
            model_device = next(self.model.parameters()).device
            logger.info(f"🔍 模型实际设备: {model_device}")

        except Exception as e:
            logger.error(f"❌ GPU模式下Whisper初始化失败: {e}")
            raise Exception(f"GPU初始化失败，请检查CUDA配置: {e}")

    def transcribe(self, audio_file: str) -> dict:
        """转录音频文件"""
        logger.info(f"🎵 开始在GPU上转录: {audio_file}")
        logger.info(f"🔥 使用设备: {self.device}")

        try:
            # GPU模式强制使用fp16以提升性能
            logger.info("⚡ 启用FP16精度以加速GPU推理...")

            # 使用OpenAI Whisper库进行GPU转录
            result = self.model.transcribe(
                audio_file,
                language='chinese',
                word_timestamps=True,
                fp16=True,  # GPU模式强制使用fp16
                verbose=False,  # 减少日志输出
                task='transcribe'
            )

            logger.info(f"✅ 转录完成，文本长度: {len(result['text'])}字符")

            return result

        except Exception as e:
            logger.error(f"❌ 转录失败: {e}")
            raise e

    def get_text_from_result(self, result: dict) -> str:
        """从转录结果中提取文本"""
        return result.get('text', '').strip()

    def get_word_timestamps(self, result: dict) -> list:
        """从转录结果中提取词级时间戳"""
        word_timestamps = []

        if 'segments' in result:
            for segment in result['segments']:
                if 'words' in segment:
                    for word_info in segment['words']:
                        word_timestamps.append({
                            'word': word_info.get('word', ''),
                            'start': word_info.get('start', 0),
                            'end': word_info.get('end', 0)
                        })

        return word_timestamps