"""
onnx_inference.py - ONNX模型推理示例

功能：
1. 加载导出的ONNX模型
2. 音频预处理（与训练时一致）
3. 实时推理
4. 结果后处理
"""
import threading
import time

import numpy as np
import onnxruntime as ort
import librosa
import sounddevice as sd
from typing import Tuple, Optional, List


class ONNXAudioClassifier:
    def __init__(self, onnx_path: str, sample_rate: int = 48000):
        """
        初始化ONNX分类器

        参数:
            onnx_path: ONNX模型文件路径
            sample_rate: 音频采样率(需与训练时一致)
        """
        # 创建ONNX运行时会话
        # self.session = ort.InferenceSession(
        #     onnx_path,
        #     providers=['CUDAExecutionProvider', 'CPUExecutionProvider']  # 优先使用GPU
        # )
        providers = ['CPUExecutionProvider']  # 强制使用CPU避免CUDA错误

        self.session = ort.InferenceSession(
            onnx_path,
            providers=providers
        )

        # 获取输入输出信息
        self.input_name = self.session.get_inputs()[0].name
        self.output_name = self.session.get_outputs()[0].name
        self.sample_rate = sample_rate

        # 打印模型信息
        print(f"ONNX模型加载成功，输入要求: {self.session.get_inputs()[0].shape}")
        print(f"可用执行提供者: {ort.get_available_providers()}")

    def preprocess(self, audio: np.ndarray) -> np.ndarray:
        """
        音频预处理（必须与训练时完全一致）

        参数:
            audio: 原始音频数据 [samples,]

        返回:
            符合模型输入的numpy数组
        """
        # 梅尔频谱特征提取
        mel = librosa.feature.melspectrogram(
            y=audio,
            sr=self.sample_rate,
            n_mels=256,
            n_fft=2048,
            hop_length=512
        )

        # 转换为dB尺度
        mel_db = librosa.power_to_db(mel, ref=np.max)

        # 调整尺寸 (假设模型需要[1,1,256,256])
        if mel_db.shape[1] < 256:
            mel_db = np.pad(mel_db, ((0, 0), (0, 256 - mel_db.shape[1])))
        else:
            mel_db = mel_db[:, :256]

        # 添加批次和通道维度
        return mel_db[np.newaxis, np.newaxis, :, :].astype(np.float32)

    def predict(self, audio: np.ndarray) -> Tuple[int, np.ndarray]:
        """
        执行推理

        参数:
            audio: 原始音频数据 [samples,]

        返回:
            (预测类别, 所有类别概率)
        """
        # 预处理
        inputs = self.preprocess(audio)

        # ONNX推理
        outputs = self.session.run(
            output_names=[self.output_name],
            input_feed={self.input_name: inputs}
        )

        # 后处理
        probs = softmax(outputs[0][0])  # 假设输出是未归一化的logits
        return np.argmax(probs), probs

    def realtime_predict(self, duration: int = 5):
        """
        实时音频分类演示

        参数:
            duration: 演示时长(秒)
        """
        print(f"\n开始实时分类(采样率: {self.sample_rate})...")

        def callback(indata, frames, time, status):
            """音频回调函数"""
            pred_class, probs = self.predict(indata[:, 0])
            print(f"\r预测类别: {pred_class} | 最高概率: {probs[pred_class]:.2%}", end="")

        with sd.InputStream(
                samplerate=self.sample_rate,
                channels=1,
                callback=callback,
                blocksize=self.sample_rate // 2  # 0.5秒的块
        ):
            sd.sleep(duration * 1000)


class ONNXAudioClassifierAdvanced(ONNXAudioClassifier):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.latencies = []

    def predict_with_profile(self, audio: np.ndarray) -> dict:
        """
        执行带性能分析的预测

        返回:
            {
                "prediction": int,          # 预测类别
                "probabilities": np.ndarray, # 各类别概率
                "timing": {                 # 耗时分析(秒)
                    "preprocess": float,
                    "inference": float,
                    "avg_latency": float    # 最近10次平均
                }
            }
        """
        start = time.perf_counter()
        inputs = self.preprocess(audio)  # 继承自父类
        preprocess_time = time.perf_counter() - start

        start = time.perf_counter()
        outputs = self.session.run(
            None,  # 获取所有输出
            {self.input_name: inputs}
        )
        inference_time = time.perf_counter() - start

        self.latencies.append(inference_time)

        return {
            "prediction": np.argmax(outputs[0]),
            "probabilities": softmax(outputs[0][0]),
            "timing": {
                "preprocess": preprocess_time,
                "inference": inference_time,
                "avg_latency": np.mean(self.latencies[-10:])
            }
        }

    def batch_predict(self, audio_list: List[np.ndarray]) -> List[dict]:
        """
        批量预测音频列表

        参数:
            audio_list: 音频数组列表，每个元素为1D numpy数组

        返回:
            每个音频的结果字典列表（结构与predict_with_profile相同）
        """
        # 堆叠预处理结果
        input_batch = np.stack([self.preprocess(audio)[0] for audio in audio_list])

        # 批量推理
        outputs = self.session.run(
            [self.output_name],
            {self.input_name: input_batch}
        )

        # 组装结果
        return [{
            "prediction": np.argmax(probs),
            "probabilities": softmax(probs),
        } for probs in outputs[0]]

    def start_realtime_monitor(self, update_interval: float = 1.0):
        """
        启动实时性能监控线程

        参数:
            update_interval: 状态打印间隔(秒)
        """

        def monitor_thread():
            while getattr(self, "_monitor_active", True):
                time.sleep(update_interval)
                if self.latencies:
                    avg = np.mean(self.latencies[-10:]) * 1000
                    print(f"\n[性能监控] 平均延迟: {avg:.1f}ms | 最近峰值: {max(self.latencies[-5:]) * 1000:.1f}ms")

        self._monitor_thread = threading.Thread(
            target=monitor_thread,
            daemon=True
        )
        self._monitor_thread.start()


def softmax(x: np.ndarray) -> np.ndarray:
    """Softmax归一化"""
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum()


if __name__ == "__main__":
    ## 1 普通实时监测
    # classifier = ONNXAudioClassifier(
    #     onnx_path="models/exported/model.onnx",
    #     sample_rate=48000
    # )
    #
    # # 测试实时预测
    # classifier.realtime_predict(duration=10)

    ## 2 执行带性能分析的预测 - 单个文件
    #
    # 返回:
    #     {
    #         "prediction": int,          # 预测类别
    #         "probabilities": np.ndarray, # 各类别概率
    #         "timing": {                 # 耗时分析(秒)
    #             "preprocess": float,
    #             "inference": float,
    #             "avg_latency": float    # 最近10次平均
    #         }
    #     }

    # classifier = ONNXAudioClassifierAdvanced(
    #     onnx_path="models/exported/model.onnx",
    #     sample_rate=48000
    # )
    #
    # # 单次预测
    # audio, _ = librosa.load("data/raw/human/sample_1743692044.wav", sr=16000)
    # result = classifier.predict_with_profile(audio)
    # print(f"""
    # 预测结果: {result['prediction']}
    # 置信度: {result['probabilities'][result['prediction']]:.2%}
    # 预处理耗时: {result['timing']['preprocess'] * 1000:.1f}ms
    # 推理耗时: {result['timing']['inference'] * 1000:.1f}ms
    # """)

    ## 3 实时麦克风分类

    # # 初始化
    # classifier = ONNXAudioClassifierAdvanced("models/exported/model.onnx")
    # # 启动性能监控
    # classifier.start_realtime_monitor()
    #
    # # 实时回调
    # def audio_callback(indata, frames, time, status):
    #     result = classifier.predict_with_profile(indata[:, 0])
    #     print(f"\r当前类别: {result['prediction']}", end="")
    #
    #
    # with sd.InputStream(
    #         samplerate=16000,
    #         blocksize=16000,  # 1秒块
    #         callback=audio_callback
    # ):
    #     input("正在录音...按回车停止")
    #
    # # 打印最终报告
    # print(f"\n{'=' * 40}")
    # print(f"总推理次数: {len(classifier.latencies)}")
    # print(f"最大延迟: {max(classifier.latencies) * 1000:.1f}ms")

    ## 批量处理音频
    files = ["data/raw/human/sample_1743692044.wav", "data/raw/shovel/sample_1743671102.wav",
             "data/raw/shovel/sample_1743671135.wav"]
    audio_list = [librosa.load(f, sr=16000)[0] for f in files]

    classifier = ONNXAudioClassifierAdvanced("models/exported/model.onnx")
    results = classifier.batch_predict(audio_list)

    for i, r in enumerate(results):
        print(f"{files[i]} -> 类别:{r['prediction']} 置信度:{r['probabilities'][r['prediction']]:.2%}")
