"""
AudioFly 推理引擎
参考: https://ai.gitcode.com/ifly_opensource/AudioFly
AudioFly 是文本到音频生成模型（LDM架构）
"""
import os
import torch
import logging
from typing import Dict, List, Optional, Union
import numpy as np
import librosa
import soundfile as sf

logger = logging.getLogger(__name__)


class AudioFlyInference:
    """AudioFly推理引擎"""
    
    def __init__(self, model, processor, device: str):
        self.model = model
        self.processor = processor
        self.device = device
    
    def process(
        self,
        text: Optional[str] = None,
        audio: Optional[Union[str, np.ndarray, Dict]] = None,
        task: str = "generation",
        name: Optional[str] = None,
        savedir: Optional[str] = None,
        cfg: float = 3.5,
        ddim_steps: int = 200,
        **kwargs
    ) -> Dict:
        """
        处理文本到音频生成（AudioFly的主要功能）
        参考: https://ai.gitcode.com/ifly_opensource/AudioFly/blob/main/README.md
        
        Args:
            text: 文本提示（用于生成音频）
            audio: 音频输入（用于其他任务，如转录）
            task: 任务类型 (generation, transcription, translation, classification, enhancement)
            name: 输出文件名
            savedir: 保存目录
            cfg: Guidance scale (默认3.5)
            ddim_steps: 去噪步数 (默认200)
            **kwargs: 其他参数
        
        Returns:
            处理结果字典
        """
        try:
            # AudioFly主要是文本到音频生成
            if task == "generation" or (text and not audio):
                return self._generate_audio(text, name, savedir, cfg, ddim_steps, **kwargs)
            elif task == "transcription":
                return self._transcribe(audio, **kwargs)
            elif task == "translation":
                return self._translate(audio, **kwargs)
            elif task == "classification":
                return self._classify(audio, **kwargs)
            elif task == "enhancement":
                return self._enhance(audio, **kwargs)
            else:
                raise ValueError(f"不支持的任务类型: {task}")
                
        except Exception as e:
            logger.error(f"处理失败: {str(e)}")
            raise
    
    def _generate_audio(self, text: str, name: Optional[str] = None, 
                       savedir: Optional[str] = None, cfg: float = 3.5, 
                       ddim_steps: int = 200, **kwargs) -> Dict:
        """
        文本到音频生成（AudioFly的主要功能）
        参考: https://ai.gitcode.com/ifly_opensource/AudioFly/blob/main/README.md
        """
        try:
            if not text:
                raise ValueError("文本到音频生成需要提供text参数")
            
            # 设置默认值
            if name is None:
                import hashlib
                name = hashlib.md5(text.encode()).hexdigest()[:8]
            
            if savedir is None:
                savedir = "/app/result"
            
            # 确保输出目录存在
            os.makedirs(savedir, exist_ok=True)
            
            logger.info(f"生成音频: text='{text}', name={name}, savedir={savedir}")
            
            # 调用模型的generate_sample方法
            if hasattr(self.model, 'generate_sample'):
                self.model.generate_sample(
                    textlist=[text],
                    name=name,
                    cfg=cfg,
                    ddim_steps=ddim_steps,
                    outputdir=savedir,
                    **kwargs
                )
                
                # 查找生成的文件
                output_file = os.path.join(savedir, f"{name}.wav")
                if not os.path.exists(output_file):
                    # 尝试其他可能的扩展名
                    for ext in ['.mp3', '.flac', '.ogg']:
                        alt_file = os.path.join(savedir, f"{name}{ext}")
                        if os.path.exists(alt_file):
                            output_file = alt_file
                            break
                
                return {
                    "task": "generation",
                    "text": text,
                    "output_file": output_file if os.path.exists(output_file) else None,
                    "savedir": savedir,
                    "name": name,
                    "status": "success" if os.path.exists(output_file) else "generated_but_file_not_found"
                }
            else:
                raise AttributeError("模型没有generate_sample方法")
                
        except Exception as e:
            logger.error(f"音频生成失败: {str(e)}")
            raise
    
    def _load_audio(self, audio_input):
        """加载音频文件"""
        if isinstance(audio_input, str):
            # 从文件路径加载
            try:
                audio, sr = librosa.load(audio_input, sr=16000)
                return audio, sr
            except:
                # 尝试使用soundfile
                audio, sr = sf.read(audio_input)
                if sr != 16000:
                    audio = librosa.resample(audio, orig_sr=sr, target_sr=16000)
                return audio, 16000
        elif isinstance(audio_input, np.ndarray):
            return audio_input, 16000
        else:
            raise ValueError(f"不支持的音频输入类型: {type(audio_input)}")
    
    def _transcribe(self, audio, language=None, return_timestamps=False, **kwargs):
        """语音转文字（ASR）
        参考: https://ai.gitcode.com/ifly_opensource/AudioFly/blob/main/README.md
        """
        try:
            # 检查模型是否有直接的推理方法（ModelScope模型可能有）
            if hasattr(self.model, 'transcribe') or hasattr(self.model, '__call__'):
                # 如果模型有transcribe方法，直接使用
                if hasattr(self.model, 'transcribe'):
                    result = self.model.transcribe(audio, language=language, **kwargs)
                    if isinstance(result, str):
                        return {
                            "task": "transcription",
                            "text": result,
                            "language": language or "auto"
                        }
                    return result
                # 如果模型可以直接调用
                elif hasattr(self.model, '__call__'):
                    result = self.model(audio, language=language, **kwargs)
                    if isinstance(result, str):
                        return {
                            "task": "transcription",
                            "text": result,
                            "language": language or "auto"
                        }
                    return result
            
            # 加载音频
            audio_array, sr = self._load_audio(audio)
            
            # 预处理音频
            if self.processor:
                # 使用processor处理音频
                inputs = self.processor(
                    audio_array,
                    sampling_rate=sr,
                    return_tensors="pt"
                )
                if self.device != "cpu":
                    inputs = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v 
                             for k, v in inputs.items()}
            else:
                # 如果没有processor，直接使用音频数组
                inputs = {"input_values": torch.tensor(audio_array).unsqueeze(0)}
                if self.device != "cpu":
                    inputs["input_values"] = inputs["input_values"].to(self.device)
            
            # 模型推理
            with torch.no_grad():
                if hasattr(self.model, 'generate'):
                    # 使用generate方法（适用于seq2seq模型）
                    generate_kwargs = {"language": language} if language else {}
                    generate_kwargs.update(kwargs)
                    
                    generated_ids = self.model.generate(
                        inputs.get("input_values", inputs.get("input_features", list(inputs.values())[0])),
                        **generate_kwargs
                    )
                    # 使用processor解码
                    if self.processor and hasattr(self.processor, 'decode'):
                        text = self.processor.decode(
                            generated_ids[0],
                            skip_special_tokens=True
                        )
                    else:
                        text = self._decode_ids(generated_ids[0])
                else:
                    # 使用forward方法
                    outputs = self.model(**inputs)
                    if hasattr(outputs, 'logits'):
                        # 获取预测的token IDs
                        predicted_ids = torch.argmax(outputs.logits, dim=-1)
                        # 解码
                        if self.processor and hasattr(self.processor, 'decode'):
                            text = self.processor.decode(
                                predicted_ids[0],
                                skip_special_tokens=True
                            )
                        else:
                            text = self._decode_ids(predicted_ids)
                    elif hasattr(outputs, 'text'):
                        text = outputs.text
                    else:
                        text = str(outputs)
            
            result = {
                "task": "transcription",
                "text": text,
                "language": language or "auto"
            }
            
            if return_timestamps:
                result["segments"] = []
            
            return result
            
        except Exception as e:
            logger.error(f"转录失败: {str(e)}")
            raise
    
    def _translate(self, audio, source_language=None, target_language="en", **kwargs):
        """语音翻译"""
        try:
            # 类似转录，但指定翻译任务
            result = self._transcribe(audio, language=source_language, **kwargs)
            result["task"] = "translation"
            result["source_language"] = source_language
            result["target_language"] = target_language
            result["translated_text"] = result.pop("text")
            return result
        except Exception as e:
            logger.error(f"翻译失败: {str(e)}")
            raise
    
    def _classify(self, audio, **kwargs):
        """音频分类"""
        try:
            # 加载音频
            audio_array, sr = self._load_audio(audio)
            
            # 预处理音频
            if self.processor:
                inputs = self.processor(
                    audio_array,
                    sampling_rate=sr,
                    return_tensors="pt"
                )
                if self.device != "cpu":
                    inputs = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v 
                             for k, v in inputs.items()}
            else:
                inputs = {"input_values": torch.tensor(audio_array).unsqueeze(0)}
                if self.device != "cpu":
                    inputs["input_values"] = inputs["input_values"].to(self.device)
            
            # 模型推理
            with torch.no_grad():
                outputs = self.model(**inputs)
            
            # 获取分类结果
            if hasattr(outputs, 'logits'):
                probs = torch.softmax(outputs.logits, dim=-1)
                top_probs, top_indices = torch.topk(probs, k=min(5, probs.shape[-1]))
                
                predictions = []
                for prob, idx in zip(top_probs[0], top_indices[0]):
                    # 尝试获取标签名称
                    label = f"class_{idx.item()}"
                    if hasattr(self.model, 'config') and hasattr(self.model.config, 'id2label'):
                        label = self.model.config.id2label.get(idx.item(), label)
                    
                    predictions.append({
                        "label": label,
                        "score": float(prob.item())
                    })
            elif hasattr(outputs, 'prediction_logits'):
                # 某些模型使用不同的输出格式
                probs = torch.softmax(outputs.prediction_logits, dim=-1)
                top_probs, top_indices = torch.topk(probs, k=min(5, probs.shape[-1]))
                predictions = [
                    {"label": f"class_{idx.item()}", "score": float(prob.item())}
                    for prob, idx in zip(top_probs[0], top_indices[0])
                ]
            else:
                predictions = [{"label": "unknown", "score": 1.0}]
            
            return {
                "task": "classification",
                "predictions": predictions
            }
            
        except Exception as e:
            logger.error(f"分类失败: {str(e)}")
            raise
    
    def _enhance(self, audio, **kwargs):
        """音频增强"""
        try:
            # 加载音频
            audio_array, sr = self._load_audio(audio)
            
            # 预处理音频
            if self.processor:
                inputs = self.processor(
                    audio_array,
                    sampling_rate=sr,
                    return_tensors="pt"
                )
                if self.device != "cpu":
                    inputs = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v 
                             for k, v in inputs.items()}
            else:
                inputs = {"input_values": torch.tensor(audio_array).unsqueeze(0)}
                if self.device != "cpu":
                    inputs["input_values"] = inputs["input_values"].to(self.device)
            
            # 模型推理
            with torch.no_grad():
                outputs = self.model(**inputs)
            
            # 获取增强后的音频
            if hasattr(outputs, 'enhanced_audio'):
                enhanced_audio = outputs.enhanced_audio.cpu().numpy()
            elif hasattr(outputs, 'waveform'):
                enhanced_audio = outputs.waveform.cpu().numpy()
            elif hasattr(outputs, 'last_hidden_state'):
                # 某些模型输出特征，需要后处理
                enhanced_audio = outputs.last_hidden_state.cpu().numpy()
                logger.warning("模型输出特征而非音频，返回原始音频")
                enhanced_audio = audio_array
            else:
                logger.warning("无法从模型输出中提取增强音频，返回原始音频")
                enhanced_audio = audio_array
            
            # 确保是numpy数组
            if isinstance(enhanced_audio, torch.Tensor):
                enhanced_audio = enhanced_audio.cpu().numpy()
            
            # 如果是多维数组，取第一个
            if len(enhanced_audio.shape) > 1:
                enhanced_audio = enhanced_audio[0] if enhanced_audio.shape[0] == 1 else enhanced_audio
            
            return {
                "task": "enhancement",
                "enhanced_audio": enhanced_audio.tolist(),
                "sampling_rate": sr
            }
            
        except Exception as e:
            logger.error(f"增强失败: {str(e)}")
            raise
    
    def _decode_ids(self, ids):
        """解码token IDs为文本"""
        if self.processor and hasattr(self.processor, 'decode'):
            try:
                return self.processor.decode(ids, skip_special_tokens=True)
            except:
                pass
        
        # 如果processor不可用，尝试从模型获取tokenizer
        if hasattr(self.model, 'tokenizer'):
            try:
                return self.model.tokenizer.decode(ids, skip_special_tokens=True)
            except:
                pass
        
        # 最后的后备方案
        logger.warning("无法解码token IDs，返回占位符")
        return f"decoded_text_from_ids_{ids.shape}"


