from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
from datasets import Dataset, Audio
from typing import Union
import torch
import logging

logger = logging.getLogger(__name__)

class SimpleWhisperASR:
    """简化的Whisper ASR实现，基于参考实现"""

    def __init__(self,
                 device='cuda:9',
                 max_new_tokens=1024,
                 language='chinese',
                 task='transcribe',
                 chunk_length_s=30,
                 batch_size=16,
                 word_level_timestamps=True):
        """
        初始化Whisper ASR

        Args:
            device: GPU设备
            max_new_tokens: 最大生成token数
            language: 转录语言
            task: 任务类型
            chunk_length_s: 音频分块长度
            batch_size: 批处理大小
            word_level_timestamps: 是否启用词级时间戳
        """
        self.device = device if torch.cuda.is_available() else 'cpu'
        logger.info(f"🎤 Whisper ASR 初始化，使用设备: {self.device}")

        # 配置数据类型
        torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32

        # 模型ID
        model_id = 'openai/whisper-large-v3'
        logger.info(f"📥 正在加载模型: {model_id}")

        # 加载模型和处理器
        model = AutoModelForSpeechSeq2Seq.from_pretrained(
            model_id,
            torch_dtype=torch_dtype,
            low_cpu_mem_usage=True,
            use_safetensors=True
        )
        model.to(self.device)

        processor = AutoProcessor.from_pretrained(model_id)

        # 创建pipeline - 完全按照参考实现
        self.pipe = pipeline(
            'automatic-speech-recognition',
            model=model,
            tokenizer=processor.tokenizer,
            feature_extractor=processor.feature_extractor,
            chunk_length_s=chunk_length_s,
            max_new_tokens=max_new_tokens,
            batch_size=batch_size,
            return_timestamps=True if not word_level_timestamps else 'word',
            torch_dtype=torch_dtype,
            device=self.device
        )

        self._kwargs = {'language': language, 'task': task}
        logger.info(f"✅ Whisper ASR 初始化完成，词级时间戳: {'启用' if word_level_timestamps else '禁用'}")

    def transcribe(self, audio_file: str) -> dict:
        """
        转录音频文件 - 简化版本

        Args:
            audio_file: 音频文件路径

        Returns:
            转录结果
        """
        logger.info(f"🎵 开始转录音频: {audio_file}")

        try:
            # 创建数据集 - 按照参考实现
            ds = Dataset.from_dict({'audio': [audio_file]}).cast_column(
                'audio', Audio(sampling_rate=16000))

            # 处理音频 - 按照参考实现
            result = self._handle(ds)[0]  # 取第一个结果
            logger.info(f"✅ 转录完成")
            return result

        except Exception as e:
            logger.error(f"❌ 转录失败: {e}")
            raise e

    def _handle(self, ds):
        """处理数据集 - 完全按照参考实现"""
        res = []
        for data in ds:
            sample = data['audio']
            logger.info(f"🔊 处理音频片段，时长: {sample['array'].size / sample['sampling_rate']:.2f}秒")

            # 进行转录 - 按照参考实现
            pred = self.pipe(sample.copy(), generate_kwargs=self._kwargs)
            res.append(pred['chunks'])
        return res

    def get_transcription_text(self, result: dict) -> str:
        """从转录结果中提取纯文本"""
        if not result or not isinstance(result, list):
            return ""

        text_parts = []
        for chunk in result:
            if isinstance(chunk, dict) and 'text' in chunk:
                text = chunk['text'].strip()
                if text:
                    text_parts.append(text)

        return ' '.join(text_parts)

    def get_word_timestamps(self, result: dict) -> list:
        """从转录结果中提取词级时间戳"""
        word_timestamps = []

        if not result or not isinstance(result, list):
            return word_timestamps

        for chunk in result:
            if isinstance(chunk, dict):
                if 'words' in chunk and chunk['words']:
                    # 有词级时间戳
                    for word_info in chunk['words']:
                        if isinstance(word_info, dict):
                            word_timestamps.append({
                                'word': word_info.get('word', ''),
                                'start': word_info.get('start', 0),
                                'end': word_info.get('end', 0)
                            })
                elif 'text' in chunk and 'timestamp' in chunk:
                    # 没有词级时间戳，使用chunk级别时间戳
                    text = chunk['text'].strip()
                    if text:
                        timestamp = chunk['timestamp']
                        if isinstance(timestamp, (list, tuple)) and len(timestamp) >= 2:
                            start = float(timestamp[0])
                            end = float(timestamp[1])

                            # 将文本分割成词，平均分配时间
                            words = text.split()
                            if words:
                                duration = end - start
                                word_duration = duration / len(words)

                                for i, word in enumerate(words):
                                    word_start = start + i * word_duration
                                    word_end = word_start + word_duration
                                    word_timestamps.append({
                                        'word': word,
                                        'start': word_start,
                                        'end': word_end
                                    })

        return word_timestamps