import queue
import sys
import sounddevice as sd
import soundfile as sf
from datetime import datetime
import wave
import numpy as np
from torch_audioset_npu.torch_audioset.data.torch_input_processing import WaveformToInput as TorchTransform
import torch
from ais_bench.infer.interface import InferSession
from sound_names import sound_names

sound_id = {
    35: '口哨声',
    42: '咳嗽',
    57: '响指',
    418: '电动工具',
    420: '爆炸',
    437: '破碎',
    513: '刺耳声音',
}


def cosine_similarity(vector1, vector2):
    vector1 = vector1.flatten()
    vector2 = vector2.flatten()

    unit_vector1 = vector1 / np.linalg.norm(vector1)
    unit_vector2 = vector2 / np.linalg.norm(vector2)

    similarity = np.dot(unit_vector1, unit_vector2)
    return similarity


def load_from_int16_data(data, sample_rate):
    """直接从int16字节数据加载音频"""
    # 将字节数据转换为numpy数组
    audio_array = np.frombuffer(data, dtype=np.int16)
    # 转换为float32并归一化
    audio_array = audio_array.astype(np.float32) / 2**15
    # 重塑为2D数组并转置
    audio_array = audio_array.reshape(-1, 1).T
    return audio_array, sample_rate


def load_from_int16_file(fname):
    x, sr = sf.read(fname, dtype='int16', always_2d=True)
    x = x / 2 ** 15
    x = x.T.astype(np.float32)
    return x, sr

class SoundRecog:
    def __init__(self, sample_rate=16000):
        self.sample_rate = sample_rate
        self.audio_queue = queue.Queue()
        self.om_sess = InferSession(0, 'yamnet_sim.om')
        self.sound_names = sound_names
        self.topk = 20

        self.data = 0
        
    def callback(self, indata, frames, time, status):
        """音频回调函数，将音频数据放入队列"""
        if status:
            print(status, file=sys.stderr)
        self.audio_queue.put(bytes(indata))
    
    def transcribe_audio(self):
        """转录音频数据并处理"""
        # 生成唯一的文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"recording_{timestamp}.wav"
        
        try: 
            # 打开WAV文件准备写入
            with wave.open(filename, 'wb') as wav_file:
                # 设置WAV文件参数
                wav_file.setnchannels(1)  # 单声道
                wav_file.setsampwidth(2)  # 16位样本宽度
                wav_file.setframerate(self.sample_rate)  # 采样率
                
                # 收集所有音频数据
                with sd.RawInputStream(samplerate=self.sample_rate, blocksize=16000,
                                      dtype='int16', channels=1,
                                      callback=self.callback):
                    while True:
                        data = self.audio_queue.get()
                        wav_file.writeframes(data)
                        audio_data = b'' + data
    
                        waveforms, sr2 = load_from_int16_data(audio_data, self.sample_rate)
                        waveform = waveforms[0]
                        waveform_for_torch = torch.tensor(waveforms)
                        patches, spectrogram = TorchTransform().wavform_to_log_mel(waveform_for_torch, 16000)
                        patches = patches.numpy()
                        print(f"[INFO]patches.shape={patches.shape}")
                        
                        pred_scores = self.om_sess.infer([patches], custom_sizes=1000)[0]

                        top_k_indice = np.argsort(pred_scores[0])[::-1][:self.topk]
                        top_k_names = [self.sound_names[k] for k in top_k_indice]
                        print(f"[INFO]top_k_names={top_k_names}")
                        print(f"[INFO]top_k_indice={top_k_indice}")
                        
                        result = ""
                        for k in top_k_indice:
                            if k in sound_id.keys():
                                result = self.sound_names[k]
                                break
                        print(f"[INFO]result=========={result}")

                        yield result


                        
        except KeyboardInterrupt:
            print("\n录音结束")
        except Exception as e:
            print(f"发生错误: {str(e)}")


if __name__ == '__main__':
    sr = SoundRecog()
    for r in sr.transcribe_audio():
        print(r)
