import sys
import os
import time
import numpy as np
import librosa
import librosa.display
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QLabel, QPushButton, QFileDialog, QVBoxLayout, QHBoxLayout, QWidget, QGridLayout
import matplotlib.pyplot as plt
import pyaudio
import wave
import torch

from preprocess import preprocess_audio, extract_features
from model import build_model, build_model2, build_resnet_8_model, build_resnet_10_model, build_resnet_18_model

# 修改模型文件后缀为 .pth（确保转换后的模型已保存为 PyTorch 格式）
MODEL_PATHS = [
    "../models/best2/best_model.pth",
    "../models/best2/best_model2.pth",
    "../models/best2/best_model_resnet_8.pth",
    "../models/best2/best_model_resnet_10.pth",
    "../models/best2/best_model_resnet_18.pth"
]

# 全局变量
TARGET_SAMPLE_RATE = 22050
LABELS = {0: '非病理性', 1: '哮喘', 2: '咽炎', 3: '支气管炎', 4: '百日咳'}  # 标签映射
AUDIO_CHUNK = 1024  # 录音块大小

def load_model_by_index(index, input_shape):
    """
    根据模型索引和输入形状构建并加载模型
    index: 模型索引（0: SimpleCNN, 1: SimpleCNN2, 2: ResNet-8, 3: ResNet-10, 4: ResNet-18）
    input_shape: (channels, height, width)
    """
    if index == 0:
        model = build_model(input_shape)
    elif index == 1:
        model = build_model2(input_shape)
    elif index == 2:
        model = build_resnet_8_model(input_shape)
    elif index == 3:
        model = build_resnet_10_model(input_shape)
    elif index == 4:
        model = build_resnet_18_model(input_shape)
    else:
        raise ValueError("Invalid model index")
    model.load_state_dict(torch.load(MODEL_PATHS[index], map_location='cpu'))
    model.eval()
    return model

class CoughRecognitionApp(QWidget):
    def __init__(self):
        super().__init__()
        self.initUI()
        # 默认输入形状根据原始数据设定 (1, 141, 173)
        default_input_shape = (1, 141, 173)
        self.model = load_model_by_index(0, default_input_shape)  # 加载第一个模型
        self.audio_data = []  # 存储录音数据
        self.is_recording = False  # 录音状态标志
        self.audio_buffer = []  # 缓存音频数据，用于实时波形显示

    def initUI(self):
        self.setWindowTitle('咳嗽类型识别')
        self.setGeometry(100, 100, 1200, 700)  # 设置窗口初始大小和位置
        self.setFixedSize(1200, 700)  # 固定窗口大小

        # 创建6个小块布局
        grid = QGridLayout()

        # 第一行，第一列 - 本地读取和录音按钮
        self.file_button = QPushButton('本地读取', self)
        self.file_button.setFixedSize(150, 50)  # 设置按钮大小
        self.file_button.clicked.connect(self.load_audio_file)

        self.record_button = QPushButton('开始录音', self)
        self.record_button.setFixedSize(150, 50)  # 设置按钮大小
        self.record_button.clicked.connect(self.toggle_recording)

        vbox1 = QVBoxLayout()
        vbox1.addWidget(self.file_button)
        vbox1.addWidget(self.record_button)
        grid.addLayout(vbox1, 0, 0)

        # 第一行，第二列 - 音频波形显示
        self.waveform_label = QLabel(self)
        self.waveform_label.setFixedSize(770, 210)  # 固定波形图大小
        grid.addWidget(self.waveform_label, 0, 1)

        # 第二行，第一列 - 模型选择按钮
        self.model_buttons = []
        model_names = ["Model 1", "Model 2", "ResNet-8", "ResNet-10", "ResNet-18"]
        vbox2 = QVBoxLayout()
        for i, name in enumerate(model_names):
            btn = QPushButton(name, self)
            btn.setFixedSize(150, 50)
            btn.clicked.connect(lambda _, x=i: self.change_model(x))  # 动态加载模型
            vbox2.addWidget(btn)
            self.model_buttons.append(btn)
        grid.addLayout(vbox2, 1, 0)

        # 第二行，第二列 - 特征图显示
        hbox_feature = QHBoxLayout()
        self.feature_label1 = QLabel(self)
        self.feature_label1.setFixedSize(360, 270)  # Mel特征图大小
        self.feature_label2 = QLabel(self)
        self.feature_label2.setFixedSize(360, 270)  # MFCC特征图大小
        hbox_feature.addWidget(self.feature_label1)
        hbox_feature.addWidget(self.feature_label2)
        grid.addLayout(hbox_feature, 1, 1)

        # 第三行，第一列 - 开始识别按钮
        self.recognize_button = QPushButton('开始识别', self)
        self.recognize_button.setFixedSize(150, 50)  # 固定按钮大小
        self.recognize_button.clicked.connect(self.recognize_cough_type)
        grid.addWidget(self.recognize_button, 2, 0)

        # 第三行，第二列 - 识别结果显示
        self.result_label = QLabel('识别结果: ', self)
        self.result_label.setFixedSize(400, 50)  # 固定结果显示标签大小
        grid.addWidget(self.result_label, 2, 1)

        self.setLayout(grid)

        # 使用 QTimer 定期更新波形图
        self.timer = QtCore.QTimer(self)
        self.timer.timeout.connect(self.update_waveform)
        self.timer.start(100)  # 每 100 毫秒更新一次波形图

    def load_audio_file(self):
        """
        加载本地音频文件
        """
        options = QFileDialog.Options()
        file_path, _ = QFileDialog.getOpenFileName(self, "加载音频文件", "", "WAV文件 (*.wav);;所有文件 (*)",
                                                   options=options)
        if file_path:
            self.process_audio(file_path)
            self.display_waveform(file_path)

    def toggle_recording(self):
        """
        启动和停止录音
        """
        if not self.is_recording:
            self.is_recording = True
            self.record_button.setText('停止录音')
            self.audio_data = []
            self.audio_buffer = []  # 清空缓存
            self.record_audio()
        else:
            self.is_recording = False
            self.record_button.setText('开始录音')
            self.save_and_process_recording()

    def record_audio(self):
        """
        实时录音并显示波形图
        """
        def callback(in_data, frame_count, time_info, status):
            self.audio_data.append(in_data)
            self.audio_buffer.append(np.frombuffer(in_data, dtype=np.int16))
            return (in_data, pyaudio.paContinue)

        self.audio_stream = pyaudio.PyAudio().open(format=pyaudio.paInt16, channels=1, rate=TARGET_SAMPLE_RATE,
                                                   input=True, frames_per_buffer=AUDIO_CHUNK, stream_callback=callback)
        self.audio_stream.start_stream()

    def save_and_process_recording(self):
        """
        保存录音并处理
        """
        self.audio_stream.stop_stream()
        self.audio_stream.close()
        p = pyaudio.PyAudio()
        start_time = time.strftime('%Y年%m月%d日_%H-%M-%S')
        wav_output_filename = f"temp/record_{start_time}.wav"
        wf = wave.open(wav_output_filename, 'wb')
        wf.setnchannels(1)
        wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
        wf.setframerate(TARGET_SAMPLE_RATE)
        wf.writeframes(b''.join(self.audio_data))
        wf.close()
        self.process_audio(wav_output_filename)
        self.display_waveform(wav_output_filename)  # 显示录音的波形图

    def process_audio(self, file_path):
        """
        预处理音频，提取特征，并显示特征图
        """
        preprocessed_audio = preprocess_audio(file_path)
        self.feature = extract_features(preprocessed_audio, TARGET_SAMPLE_RATE)
        self.display_features(self.feature)

    def recognize_cough_type(self):
        """
        使用模型识别咳嗽类型
        """
        if hasattr(self, 'feature'):
            cough_type = self.predict_cough_type(self.feature)
            self.result_label.setText(f'识别结果: {cough_type}')

    def predict_cough_type(self, feature):
        """
        使用模型预测咳嗽类型
        将 feature 转换为 (1, C, H, W) 格式，并调用 PyTorch 模型
        """
        if feature.ndim == 2:
            feature = feature[np.newaxis, np.newaxis, :, :]
        elif feature.ndim == 3:
            if feature.shape[-1] == 1:
                feature = np.transpose(feature, (2, 0, 1))[np.newaxis, ...]
            else:
                feature = np.transpose(feature, (2, 0, 1))[np.newaxis, ...]
        tensor_feature = torch.tensor(feature, dtype=torch.float32)
        with torch.no_grad():
            outputs = self.model(tensor_feature)
            _, predicted = torch.max(outputs, 1)
        return LABELS[predicted.item()]

    def display_features(self, feature):
        """
        显示 Mel 谱图特征和 MFCC 特征图
        """
        try:
            # Mel 谱图特征
            mel_feature = feature[:, :128]  # 提取前 128 列作为 Mel谱图特征
            plt.figure(figsize=(5, 4))
            plt.imshow(mel_feature.T, aspect='auto', origin='lower')
            plt.title('Mel Spectrogram Feature')
            plt.xlabel('Time')
            plt.ylabel('Frequency')
            plt.colorbar(format='%+2.0f dB')

            mel_feature_img_path = "temp/mel_feature.png"
            plt.savefig(mel_feature_img_path, bbox_inches='tight')
            self.feature_label1.setPixmap(
                QtGui.QPixmap(mel_feature_img_path).scaled(self.feature_label1.size(), QtCore.Qt.KeepAspectRatio))

            # MFCC 特征
            mfcc_feature = feature[:, 128:]  # 提取 128 列之后作为 MFCC 特征
            plt.figure(figsize=(5, 4))
            plt.imshow(mfcc_feature.T, aspect='auto', origin='lower')
            plt.title('MFCC Feature')
            plt.xlabel('Time')
            plt.ylabel('MFCC Coefficients')
            plt.colorbar()

            mfcc_feature_img_path = "temp/mfcc_feature.png"
            plt.savefig(mfcc_feature_img_path, bbox_inches='tight')
            self.feature_label2.setPixmap(
                QtGui.QPixmap(mfcc_feature_img_path).scaled(self.feature_label2.size(), QtCore.Qt.KeepAspectRatio))

            plt.close('all')
        except Exception as e:
            print(f"Error displaying features: {e}")

    def update_waveform(self):
        """
        实时显示音频波形图
        """
        if len(self.audio_buffer) > 0:
            audio_chunk = np.concatenate(self.audio_buffer)
            self.audio_buffer = []  # 清空缓存

            try:
                plt.figure(figsize=(12, 2))
                plt.plot(audio_chunk)
                plt.title('Waveform')
                plt.xlabel('Time')
                plt.ylabel('Amplitude')
                plt.tight_layout()

                waveform_img_path = "temp/waveform.png"
                plt.savefig(waveform_img_path, bbox_inches='tight')
                self.waveform_label.setPixmap(
                    QtGui.QPixmap(waveform_img_path).scaled(self.waveform_label.size(), QtCore.Qt.KeepAspectRatio))

                plt.close()
            except Exception as e:
                print(f"Error updating waveform: {e}")

    def display_waveform(self, file_path):
        """
        显示音频文件的波形图
        """
        try:
            y, sr = librosa.load(file_path, sr=None)
            plt.figure(figsize=(12, 2))
            plt.plot(np.linspace(0, len(y) / sr, num=len(y)), y)
            plt.title('Waveform')
            plt.xlabel('Time')
            plt.ylabel('Amplitude')
            plt.tight_layout()

            waveform_img_path = "temp/waveform.png"
            plt.savefig(waveform_img_path, bbox_inches='tight')
            self.waveform_label.setPixmap(
                QtGui.QPixmap(waveform_img_path).scaled(self.waveform_label.size(), QtCore.Qt.KeepAspectRatio))
            plt.close()
        except Exception as e:
            print(f"Error displaying waveform: {e}")

    def change_model(self, index):
        """
        根据选择的模型加载新的模型
        如果已存在特征则根据特征确定输入形状，否则使用默认形状 (1, 141, 173)
        """
        if hasattr(self, 'feature'):
            feature = self.feature
            if feature.ndim == 2:
                input_shape = (1, feature.shape[0], feature.shape[1])
            elif feature.ndim == 3:
                if feature.shape[-1] == 1:
                    input_shape = (1, feature.shape[0], feature.shape[1])
                else:
                    input_shape = (feature.shape[-1], feature.shape[0], feature.shape[1])
        else:
            input_shape = (1, 141, 173)
        self.model = load_model_by_index(index, input_shape)
        print(f"Model {index + 1} loaded: {MODEL_PATHS[index]}")

if __name__ == '__main__':
    app = QtWidgets.QApplication(sys.argv)
    window = CoughRecognitionApp()
    window.show()
    sys.exit(app.exec_())
