from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
from datasets import Dataset, Audio
from typing import Union, Iterable
import torch
import logging

logger = logging.getLogger(__name__)

class ReferenceWhisperASR:
    """完全按照参考实现的Whisper ASR"""

    def __init__(self,
                 device=None,
                 max_new_tokens=128,
                 language='chinese',
                 task='transcribe',
                 chunk_length_s=30,
                 batch_size=16,
                 word_level_timestamps=True):
        self.device = device if device is not None else 'cuda:9' if torch.cuda.is_available() else 'cpu'
        torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
        model_id = 'openai/whisper-large-v3'

        logger.info(f"🎤 加载Whisper模型: {model_id}")
        logger.info(f"📱 设备: {self.device}")

        model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id,
                                                          torch_dtype=torch_dtype,
                                                          low_cpu_mem_usage=True,
                                                          use_safetensors=True)
        model.to(self.device)
        processor = AutoProcessor.from_pretrained(model_id)
        self.pipe = pipeline('automatic-speech-recognition',
                             model=model,
                             tokenizer=processor.tokenizer,
                             feature_extractor=processor.feature_extractor,
                             chunk_length_s=chunk_length_s,
                             max_new_tokens=max_new_tokens,
                             batch_size=batch_size,
                             return_timestamps=True if not word_level_timestamps else 'word',
                             torch_dtype=torch_dtype,
                             device=self.device)
        self._kwargs = {'language': language, 'task': task}
        logger.info(f"✅ Whisper初始化完成")

    def __call__(self, audio_file: Union[str, Iterable]) -> Union[str, Iterable]:
        """完全按照参考实现的调用方法"""
        logger.info(f"🎵 开始转录: {audio_file}")

        ds = Dataset.from_dict({'audio': [audio_file] if isinstance(audio_file, str) else audio_file}).cast_column(
            'audio', Audio(sampling_rate=16000))

        if isinstance(audio_file, str):
            result = self._handle(ds)[0]
            logger.info(f"✅ 转录完成")
            return result
        else:
            result = self._handle(ds)
            logger.info(f"✅ 批量转录完成")
            return result

    def _handle(self, ds: Dataset) -> list:
        """完全按照参考实现的处理方法"""
        res = []
        for data in ds:
            sample = data['audio']
            logger.info(f"🔊 处理音频片段")

            try:
                pred = self.pipe(sample.copy(), generate_kwargs=self._kwargs)
                res.append(pred['chunks'])
                logger.info(f"✅ 音频片段处理完成，chunks数量: {len(pred['chunks'])}")
            except Exception as e:
                logger.error(f"❌ 音频片段处理失败: {e}")
                import traceback
                traceback.print_exc()
                raise e

        return res

    def get_text_from_chunks(self, chunks):
        """从chunks中提取文本，按照参考实现的方式"""
        if not chunks:
            return ""

        try:
            # 按照参考实现：x['text']
            texts = [chunk['text'] for chunk in chunks if isinstance(chunk, dict) and 'text' in chunk]
            result = ','.join(texts)
            logger.info(f"📝 提取的文本长度: {len(result)}")
            return result
        except Exception as e:
            logger.error(f"❌ 提取文本失败: {e}")
            return ""

    def get_word_timestamps(self, chunks):
        """提取词级时间戳"""
        word_timestamps = []

        if not chunks:
            return word_timestamps

        try:
            for chunk in chunks:
                if isinstance(chunk, dict):
                    if 'words' in chunk and chunk['words']:
                        # 有词级时间戳
                        for word_info in chunk['words']:
                            if isinstance(word_info, dict):
                                word_timestamps.append({
                                    'word': word_info.get('word', ''),
                                    'start': float(word_info.get('start', 0)),
                                    'end': float(word_info.get('end', 0))
                                })
                    elif 'text' in chunk and chunk['text'].strip():
                        # 没有词级时间戳，使用chunk级别
                        text = chunk['text'].strip()
                        if 'timestamp' in chunk and isinstance(chunk['timestamp'], (list, tuple)) and len(chunk['timestamp']) >= 2:
                            start = float(chunk['timestamp'][0])
                            end = float(chunk['timestamp'][1])

                            words = text.split()
                            if words:
                                duration = end - start
                                word_duration = duration / len(words)

                                for i, word in enumerate(words):
                                    word_start = start + i * word_duration
                                    word_end = word_start + word_duration
                                    word_timestamps.append({
                                        'word': word,
                                        'start': word_start,
                                        'end': word_end
                                    })

            logger.info(f"📊 词级时间戳数量: {len(word_timestamps)}")
            return word_timestamps

        except Exception as e:
            logger.error(f"❌ 提取词级时间戳失败: {e}")
            return []