from typing import Dict
from pathlib import Path
import logging
import json
import numpy as np
import tritonclient.http as httpclient
import tritonclient.grpc as grpcclient
from core.translation_interface import TranslationEngine

logger = logging.getLogger(__name__)

class TritonAdapter(TranslationEngine):
    """
    Triton Inference Server 适配器
    符合8.3.1 ASR模型接口规范
    支持gRPC和HTTP协议
    """
    
    def __init__(self, 
                 server_url: str = "localhost:8001",
                 model_name: str = "whisper_asr",
                 protocol: str = "grpc",
                 use_ssl: bool = False):
        self.server_url = server_url
        self.model_name = model_name
        self.protocol = protocol.lower()
        self.use_ssl = use_ssl
        self.client = None
        self._connect()
        
    def _connect(self):
        """连接到Triton服务器"""
        try:
            if self.protocol == "grpc":
                self.client = grpcclient.InferenceServerClient(
                    url=self.server_url,
                    ssl=self.use_ssl
                )
            else:  # http
                self.client = httpclient.InferenceServerClient(
                    url=f"http://{self.server_url}",
                    ssl=self.use_ssl
                )
            
            # 检查服务器是否就绪
            if not self.client.is_server_ready():
                raise RuntimeError("Triton服务器未就绪")
                
            # 检查模型是否就绪
            if not self.client.is_model_ready(self.model_name):
                raise RuntimeError(f"模型 {self.model_name} 未就绪")
                
            logger.info(f"成功连接到Triton服务器: {self.server_url}")
            
        except Exception as e:
            logger.error(f"连接Triton服务器失败: {e}")
            raise
    
    def translate_audio(
        self, 
        audio_path: str, 
        source_lang: str, 
        target_lang: str,
        enable_translation: bool = True,
        return_segments: bool = False
    ) -> Dict:
        """通过Triton服务器进行音频翻译"""
        
        if not self.is_ready():
            raise RuntimeError("Triton客户端未就绪")
            
        try:
            # 1. 准备输入数据
            audio_data = self._load_audio(audio_path)
            
            # 2. 设置输入
            inputs = [
                grpcclient.InferInput("audio_input", audio_data.shape, "FP32") if self.protocol == "grpc"
                else httpclient.InferInput("audio_input", audio_data.shape, "FP32")
            ]
            inputs[0].set_data_from_numpy(audio_data)
            
            # 3. 设置输出
            outputs = [
                grpcclient.InferRequestedOutput("text_output") if self.protocol == "grpc"
                else httpclient.InferRequestedOutput("text_output")
            ]
            
            if enable_translation:
                outputs.append(
                    grpcclient.InferRequestedOutput("translated_output") if self.protocol == "grpc"
                    else httpclient.InferRequestedOutput("translated_output")
                )
            
            if return_segments:
                outputs.append(
                    grpcclient.InferRequestedOutput("segments_output") if self.protocol == "grpc"
                    else httpclient.InferRequestedOutput("segments_output")
                )
            
            # 4. 执行推理
            response = self.client.infer(
                model_name=self.model_name,
                inputs=inputs,
                outputs=outputs
            )
            
            # 5. 处理结果
            result = self._process_response(response, enable_translation, return_segments)
            
            return result
            
        except Exception as e:
            logger.error(f"Triton推理失败: {e}")
            raise
    
    def _load_audio(self, audio_path: str) -> np.ndarray:
        """加载音频文件为numpy数组"""
        # 这里需要根据实际音频格式进行加载
        # 简化实现，实际应该使用librosa等库
        import librosa
        audio, sr = librosa.load(audio_path, sr=16000)  # 16kHz采样率
        return audio.astype(np.float32)
    
    def _process_response(self, response, enable_translation: bool, return_segments: bool) -> Dict:
        """处理Triton响应"""
        try:
            # 获取源语言文本
            source_text = response.as_numpy("text_output").decode('utf-8')
            
            # 获取翻译文本
            if enable_translation:
                translated_text = response.as_numpy("translated_output").decode('utf-8')
            else:
                translated_text = source_text
            
            # 获取分段信息
            segments = []
            if return_segments:
                try:
                    segments_data = response.as_numpy("segments_output")
                    segments = json.loads(segments_data.decode('utf-8'))
                except:
                    segments = []
            
            # 计算置信度（这里需要根据实际模型输出调整）
            confidence = 0.95  # 默认值，实际应该从模型输出获取
            
            return {
                "text": source_text,
                "translated_text": translated_text,
                "confidence": confidence,
                "duration": 0.0,  # 需要单独计算
                "segments": segments
            }
            
        except Exception as e:
            logger.error(f"处理Triton响应失败: {e}")
            raise
    
    def is_ready(self) -> bool:
        """检查客户端是否就绪"""
        try:
            return (self.client is not None and 
                    self.client.is_server_ready() and 
                    self.client.is_model_ready(self.model_name))
        except:
            return False
    
    def get_supported_languages(self) -> Dict[str, list]:
        """获取支持的语言列表"""
        return {
            "asr_languages": ["zh", "en", "ja", "ko", "fr", "de", "es", "ru"],
            "translation_languages": ["zh", "en", "ja", "ko", "fr", "de", "es", "ru"]
        }
    
    def get_model_info(self) -> Dict:
        """获取模型信息"""
        try:
            if self.protocol == "grpc":
                metadata = self.client.get_model_metadata(self.model_name)
            else:
                metadata = self.client.get_model_metadata(self.model_name)
            return metadata
        except Exception as e:
            logger.error(f"获取模型信息失败: {e}")
            return {}




