import torch 
from transformers import pipeline
import ModelInterfaces as mi # Changed from 'from ModelInterfaces import IASRModel'
from typing import Union
import numpy as np 
import gc
import time
import os

class WhisperASRModel(mi.IASRModel): # Changed to mi.IASRModel
    def __init__(self, model_name="openai/whisper-small", device=None):
        actual_device = None
        if isinstance(device, torch.device): # Check if device is already a torch.device object
            actual_device = device
        elif isinstance(device, str): # If it's a string like "cuda" or "cpu"
            actual_device = torch.device(device)
        else: # Default if None or other type
            actual_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
        print(f"[WhisperASRModel] Initializing on device: {actual_device}")
        
        # 清理GPU内存
        if actual_device.type == "cuda":
            torch.cuda.empty_cache()
            gc.collect()
        
        # 最终修复：动态查找包含模型文件的确切快照目录
        hf_home = os.environ.get("HF_HOME", "/root/.cache/huggingface")
        model_repo_path = os.path.join(hf_home, "hub", f"models--{model_name.replace('/', '--')}")

        model_to_load = model_name  # 默认使用模型名称（用于下载）
        
        if os.path.exists(model_repo_path):
            snapshots_dir = os.path.join(model_repo_path, "snapshots")
            if os.path.exists(snapshots_dir) and os.listdir(snapshots_dir):
                # 通常只有一个快照，我们直接用第一个
                latest_snapshot = os.listdir(snapshots_dir)[0]
                model_snapshot_path = os.path.join(snapshots_dir, latest_snapshot)
                
                # 确认这个目录里真的有模型文件
                if any(f.endswith(('.bin', '.safetensors')) for f in os.listdir(model_snapshot_path)):
                    print(f"[WhisperASRModel] ✅ Found local snapshot. Using direct path: {model_snapshot_path}")
                    model_to_load = model_snapshot_path  # 核心修复：使用完整的、正确的快照路径
                else:
                    print(f"[WhisperASRModel] ⚠️ Snapshot directory found, but no model files inside. Will attempt to download.")
            else:
                print(f"[WhisperASRModel] ⚠️ Cache directory exists, but no snapshots folder. Will attempt to download.")
        else:
            print(f"[WhisperASRModel] ⚠️ Local cache for {model_name} not found. Will attempt to download.")

        try:
            print(f"[WhisperASRModel] Loading model from: {model_to_load}...")
            start_time = time.time()
            
            self.asr = pipeline(
                "automatic-speech-recognition", 
                model=model_to_load, # 使用我们精心找到的路径
                return_timestamps="word", 
                device=actual_device,
                torch_dtype=torch.float16 if actual_device.type == "cuda" else torch.float32,
                model_kwargs={
                    "attn_implementation": "eager",
                    "use_cache": True,
                }
            )
            
            load_time = time.time() - start_time
            print(f"[WhisperASRModel] Model loaded successfully in {load_time:.2f}s")
            
        except Exception as e:
            print(f"[WhisperASRModel] ❌ Failed to load {model_name}: {e}")
            raise IOError(f"Could not load Whisper model {model_name}") from e
        
        self._transcript = ""
        self._word_locations = []
        self._detected_language = None # 新增属性
        self.sample_rate = 16000 # Should match Whisper's expected sample rate

    def processAudio(self, audio:Union[np.ndarray, torch.Tensor], language: str = "en"): # 添加 language 参数
        print(f"[WhisperASRModel] Processing audio with language preference: {language}...")
        
        if isinstance(audio, torch.Tensor):
            # Ensure tensor is on CPU for numpy conversion, and is 1D or 2D [1, N]
            audio_np = audio.squeeze().detach().cpu().numpy() # Squeeze to handle [1,N] -> [N]
        elif isinstance(audio, np.ndarray):
            audio_np = audio.squeeze() # Ensure it's 1D if [1,N]
        else:
            raise TypeError("Audio input must be a PyTorch Tensor or NumPy array")

        if audio_np.ndim > 1:
            print(f"Warning: Received multi-channel audio (shape {audio_np.shape}), using first channel.")
            audio_np = audio_np[0] # Take the first channel if multi-channel
        
        # 修复：添加更好的错误处理和语言自动检测
        try:
            print(f"[WhisperASRModel] Starting ASR with auto-detection first...")
            start_time = time.time()
            
            # 第一步：让Whisper自动检测语言
            detection_result = self.asr(
                audio_np, 
                batch_size=1,
                return_timestamps=False,
                generate_kwargs={
                    "task": "transcribe",
                    # 不设置language参数，让Whisper自动检测
                }
            )
            
            # 获取检测到的语言
            detected_lang_token = detection_result.get("language")
            detected_language = detected_lang_token.strip('<|>') if detected_lang_token else None
            self._detected_language = detected_language or language
            
            print(f"[WhisperASRModel] Auto-detected language: {detected_language}, Requested: {language}")
            
            # 第二步：根据检测结果和用户期望，选择最佳语言设置
            final_language = language  # 默认使用用户指定的语言
            
            # 如果检测到的语言和期望不一致，给出警告并询问是否使用检测到的语言
            if detected_language and detected_language != language:
                print(f"[WhisperASRModel] ⚠️  Language mismatch detected!")
                print(f"  - Expected: {language}")  
                print(f"  - Detected: {detected_language}")
                print(f"  - Using detected language for better accuracy")
                final_language = detected_language  # 使用检测到的语言以获得更好的准确性
            
            # 第三步：使用确定的语言进行完整转录（包含时间戳）
            result = self.asr(
                audio_np, 
                batch_size=1,
                return_timestamps="word", # 在顶层参数中请求时间戳
                generate_kwargs={
                    "task": "transcribe",
                    "language": final_language, # 使用最终确定的语言
                }
            )
            
            inference_time = time.time() - start_time
            print(f"[WhisperASRModel] ASR inference completed in {inference_time:.2f}s using language: {final_language}")

        except RuntimeError as e:
            if "expanded size of the tensor" in str(e):
                print(f"[WhisperASRModel] 检测到维度不匹配错误，尝试fallback方案: {e}")
                # Fallback: 使用更简单的参数配置
                try:
                    result = self.asr(
                        audio_np,
                        generate_kwargs={
                            "language": language,  # fallback时使用原始语言参数
                            "task": "transcribe", 
                            "num_beams": 1,
                            "do_sample": False,
                            "return_timestamps": False  # 如果时间戳有问题，先禁用
                        }
                    )
                    self._detected_language = language  # fallback时设为原始语言
                    # 如果没有时间戳，手动创建虚拟时间戳
                    if not result.get("chunks"):
                        words = result["text"].strip().split()
                        chunks = []
                        for i, word in enumerate(words):
                            start_time = i * 0.5  # 假设每个词0.5秒
                            end_time = (i + 1) * 0.5
                            chunks.append({
                                "text": word,
                                "timestamp": [start_time, end_time]
                            })
                        result["chunks"] = chunks
                except Exception as e2:
                    print(f"[WhisperASRModel] Fallback也失败，使用最基础转录: {e2}")
                    # 最后的fallback：只转录文本，不要时间戳
                    result = self.asr(audio_np, return_timestamps=False)
                    self._detected_language = language
                    # 创建虚拟chunks
                    words = result["text"].strip().split() if result.get("text") else [""]
                    chunks = []
                    for i, word in enumerate(words):
                        chunks.append({
                            "text": word,
                            "timestamp": [i * 0.5, (i + 1) * 0.5]
                        })
                    result["chunks"] = chunks
            else:
                raise e
        except Exception as e:
            print(f"[WhisperASRModel] Unexpected error during ASR: {e}")
            # 最基础的fallback
            result = {"text": "", "chunks": []}
            self._detected_language = language
        
        self._transcript = result["text"]
        self._word_locations = []
        if result.get("chunks"):
            for word_info in result["chunks"]:
                start_time = word_info.get("timestamp", [None, None])[0]
                end_time = word_info.get("timestamp", [None, None])[1]
                
                start_ts_ticks = None
                end_ts_ticks = None

                if start_time is not None:
                    start_ts_ticks = start_time * self.sample_rate
                
                if end_time is not None:
                    end_ts_ticks = end_time * self.sample_rate
                elif start_time is not None: # If end_time is None but start_time is not
                    end_ts_ticks = (start_time + 0.5) * self.sample_rate # Estimate end_time as 0.5s after start
                                    
                self._word_locations.append({
                    "word": word_info["text"],
                    "start_ts": start_ts_ticks,
                    "end_ts": end_ts_ticks,
                    "tag": "processed"
                })
        else:
            print("Warning: ASR result did not contain 'chunks' for word timestamps.")

    def getTranscript(self) -> str:
        return self._transcript

    def getWordLocations(self) -> list:
        return self._word_locations

    def getDetectedLanguage(self) -> str:
        """
        返回 processAudio 期间检测到的主要语言。
        """
        return self._detected_language 