import onnxruntime as ort
import numpy  as np
import json
import os
import librosa

class ORTLoader:
    def __init__(self, model_path:str = "./models/data2vec_onnx/model.onnx", device:str = "cpu") -> None:
        self.model_path = model_path
        self.device = device
        self.model_dir = os.path.dirname(model_path)

        self.input_names = []
        self.output_names = []

        self.input_size = ()

        self.providers = []
        self.sesssion = None
        
        # 加载vocab和tokenizer配置
        self.vocab = self._load_vocab()
        self.tokenizer_config = self._load_tokenizer_config()
        
        # 创建id到token的映射
        self.id_to_token = {v: k for k, v in self.vocab.items()}

        self.__InitSession()
    
    def _load_vocab(self):
        """加载词汇表"""
        vocab_path = os.path.join(self.model_dir, "vocab.json")
        with open(vocab_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    
    def _load_tokenizer_config(self):
        """加载tokenizer配置"""
        config_path = os.path.join(self.model_dir, "tokenizer_config.json")
        with open(config_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    
    def __InitSession(self):
        """初始化会话"""
        if self.device.startswith("cuda"):
            self.providers = ["CUDAExecutionProvider"]
        elif self.device == "cpu":
            self.providers = ["CPUExecutionProvider"]
        else:
            raise RuntimeError(f"Current device: {self.device} is not supported!")

        self.sesssion = ort.InferenceSession(self.model_path, providers=self.providers)

        self.__getInputNames()
        self.__getOutputNames()


    def __resample_audio(self, audio, ori_samplerate, tar_samplerate):
        """重采样音频"""
        return librosa.resample(audio, orig_sr=ori_samplerate, target_sr=tar_samplerate)
    
    def ctc_decode(self, logits, blank_id=0):
        """
        CTC解码 - 贪心解码
        Args:
            logits: 模型输出的logits [seq_len, vocab_size]
            blank_id: CTC blank token的ID
        Returns:
            解码后的token序列
        """
        # 获取每个时间步的最可能token
        predicted_ids = np.argmax(logits, axis=1)
        
        # CTC解码：去除重复和blank token
        decoded = []
        previous = None
        
        for token_id in predicted_ids:
            if token_id != blank_id and token_id != previous:
                decoded.append(token_id)
            previous = token_id
        
        return decoded
    
    def tokens_to_text(self, token_ids):
        """
        将token IDs转换为文本
        Args:
            token_ids: token ID序列
        Returns:
            解码后的文本
        """
        tokens = []
        for token_id in token_ids:
            if token_id in self.id_to_token:
                token = self.id_to_token[token_id]
                # 跳过特殊token
                if token not in ["<pad>", "<s>", "</s>", "<unk>"]:
                    tokens.append(token)
        
        # 将tokens连接成文本，用空格替换词分隔符
        text = "".join(tokens)
        text = text.replace("|", " ")
        return text.strip()

    def ExecInfer(self, audio, ori_samplerate, tar_samplerate):
        """
        执行推理
        Args:
            audio: 音频数据
            ori_samplerate: 原始采样率
            tar_samplerate: 目标采样率
        Returns:
            dict: 包含logits、decoded_tokens和text的解码结果
        """
        if ori_samplerate != tar_samplerate:
            audio = self.__resample_audio(audio, ori_samplerate, tar_samplerate)
            
        infer_res = self.sesssion.run(output_names=self.output_names, input_feed={self.input_names[0]: audio})
        
        # 获取logits
        logits = infer_res[0]
        
        # 如果logits是3D (batch_size, seq_len, vocab_size)，取第一个batch
        if len(logits.shape) == 3:
            logits = logits[0]
        
        # 执行CTC解码
        decoded_tokens = self.ctc_decode(logits)
        
        # 转换为文本
        text = self.tokens_to_text(decoded_tokens)
        
        return {
            'logits': logits,
            'decoded_tokens': decoded_tokens,
            'text': text
        }
    
    # def ExecInfer_v2(self, audio, ori_samplerate, tar_samplerate):
    #     infer_res = self.sesssion.run(output_names=self.output_names, input_feed={self.input_names[0]: audio})
    #     return np.squeeze(infer_res)

    def __getInputNames(self):
        """获取输入节点名称"""
        inputs = self.sesssion.get_inputs()
        for i in inputs:
            self.input_names.append(i.name)
        
        print(f"input names: {self.input_names}")


    def __getOutputNames(self):
        """获取输出节点名称"""
        outputs = self.sesssion.get_outputs()
        for o in outputs:
            self.output_names.append(o.name)
        
        print(f"output names: {self.output_names}")