import pyaudio
import numpy as np
import torch
import time
import warnings
from transformers import AutoTokenizer, AutoModelForCausalLM
import librosa
from threading import Timer

# 忽略版本警告
warnings.filterwarnings("ignore", message="Loading a multimodal model with `AutoModelForCausalLM` is deprecated")
warnings.filterwarnings("ignore", message="Torch was not compiled with flash attention")


class StepAudioRealtimeRecognizer:
    def __init__(self, model_path="Step-Audio-2-mini", sample_rate=16000):
        """初始化实时识别器"""
        self.sample_rate = sample_rate
        self.chunk_duration = 0.5  # 每块音频时长(秒)
        self.chunk_size = int(sample_rate * self.chunk_duration)
        self.window_size = 2  # 减小窗口大小为2秒，提高速度
        self.window_samples = int(sample_rate * self.window_size)
        self.overlap_samples = int(sample_rate * 0.5)  # 减少重叠为0.5秒
        self.inference_timeout = 5  # 推理超时时间(秒)

        # 加载模型和分词器
        print("正在加载模型...")
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_path,
            trust_remote_code=True
        )
        # 添加pad_token（如果不存在）
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token

        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            trust_remote_code=True,
            torch_dtype=torch.bfloat16,
            device_map="auto",
            attn_implementation="eager"  # 禁用Flash Attention
        )
        self.model.eval()  # 设置为评估模式

        # 初始化状态
        self.audio_buffer = np.array([], dtype=np.float32)
        self.transcript = ""
        self.running = False
        self.inference_running = False  # 推理状态标记
        print("模型加载完成，准备就绪")

    def preprocess_audio(self, audio_data):
        """预处理音频为模型输入格式"""
        # 确保音频长度正确
        if len(audio_data) != self.window_samples:
            audio_data = librosa.resample(
                audio_data,
                orig_sr=len(audio_data) / self.window_size,
                target_sr=self.sample_rate
            )

        # 提取梅尔频谱图特征（降低特征维度）
        features = librosa.feature.melspectrogram(
            y=audio_data,
            sr=self.sample_rate,
            n_fft=512,
            hop_length=256,  # 增加步长，减少特征数量
            n_mels=64  # 减少梅尔带数量
        )

        # 调整维度以匹配模型预期
        features = features.transpose(1, 0)

        # 标准化
        features = (features - np.mean(features)) / (np.std(features) + 1e-8)
        return torch.FloatTensor(features).unsqueeze(0).to(self.model.device)

    def inference_with_timeout(self, audio_features):
        """带超时控制的推理函数"""
        result = [None]  # 用列表存储结果，以便在内部函数中修改
        #
        # def timeout_handler():
        #     """超时处理函数"""
        #     if self.inference_running:
        #         print("\n推理超时，跳过当前音频块")
        #         self.inference_running = False
        #
        # # 设置超时定时器
        # timer = Timer(self.inference_timeout, timeout_handler)
        # timer.start()

        try:
            self.inference_running = True
            # 执行推理
            prompt = "将以下音频转换为文字: "
            input_ids = self.tokenizer(
                prompt,
                return_tensors="pt",
                padding=True,
                truncation=True
            ).input_ids.to(self.model.device)

            with torch.no_grad():
                max_length = input_ids.shape[1] + 50  # 减少生成长度

                for _ in range(50):  # 减少生成token数量
                    if not self.inference_running:
                        break

                    outputs = self.model(
                        input_ids=input_ids,
                        audio_features=audio_features,
                        return_dict=True
                    )

                    next_token_logits = outputs.logits[:, -1, :]
                    next_token_id = torch.argmax(next_token_logits, dim=-1).unsqueeze(-1)
                    input_ids = torch.cat([input_ids, next_token_id], dim=-1)

                    if next_token_id.item() == self.tokenizer.eos_token_id or input_ids.shape[1] >= max_length:
                        break

            if self.inference_running:
                result[0] = self.tokenizer.decode(input_ids[0], skip_special_tokens=True).replace(prompt, "").strip()

        except Exception as e:
            print(f"\n识别错误: {str(e)}")
        finally:
            self.inference_running = False
            # timer.cancel()  # 取消定时器

        return result[0]

    def audio_callback(self, in_data, frame_count, time_info, status):
        """音频流回调函数"""
        if not self.running:
            return (None, pyaudio.paComplete)

        # 如果上一次推理还在运行，跳过当前块
        if self.inference_running:
            return (None, pyaudio.paContinue)

        try:
            # 处理输入音频
            audio_chunk = np.frombuffer(in_data, dtype=np.float32)
            self.audio_buffer = np.concatenate([self.audio_buffer, audio_chunk])

            # 当缓冲区足够大时进行识别
            if len(self.audio_buffer) >= self.window_samples:
                # 提取窗口音频
                window_audio = self.audio_buffer[:self.window_samples]

                # 预处理并识别
                features = self.preprocess_audio(window_audio)
                text = self.inference_with_timeout(features)

                # 更新转录结果
                if text:
                    self.transcript += " " + text
                    print(f"\r实时转录: {self.transcript}", end="", flush=True)

                # 保留重叠部分
                self.audio_buffer = self.audio_buffer[-self.overlap_samples:]

        except Exception as e:
            print(f"\n处理错误: {str(e)}")
            return (None, pyaudio.paAbort)

        return (None, pyaudio.paContinue)

    def start(self):
        """开始实时识别"""
        self.running = True
        self.audio_buffer = np.array([], dtype=np.float32)
        self.transcript = ""

        audio_interface = pyaudio.PyAudio()
        stream = None

        try:
            stream = audio_interface.open(
                format=pyaudio.paFloat32,
                channels=1,
                rate=self.sample_rate,
                input=True,
                frames_per_buffer=self.chunk_size,
                stream_callback=self.audio_callback
            )

            print("开始实时语音识别 (按Ctrl+C停止)...")
            stream.start_stream()

            while self.running and stream.is_active():
                time.sleep(0.1)

        except KeyboardInterrupt:
            print("\n用户终止识别")
        except Exception as e:
            print(f"\n发生错误: {str(e)}")
        finally:
            self.running = False
            if stream:
                stream.stop_stream()
                stream.close()
            audio_interface.terminate()
            print("\n\n完整转录结果:")
            print(self.transcript)


if __name__ == "__main__":
    model_path = "D:/modelscope/Step-Audio-2-mini"

    try:
        recognizer = StepAudioRealtimeRecognizer(model_path=model_path)
        recognizer.start()
    except Exception as e:
        print(f"初始化失败: {str(e)}")
