import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from Qt_ui import Qt_ui
import samplerate
import base64
import tensorflow as tf
import soundfile as sf
from scipy import signal
import matplotlib.pyplot as plt
import cv2
import numpy as np
from PIL import Image
import os
import random
import time
import paho.mqtt.client as mqtt
import struct
import json
from polycoherence import _polycoherence_2d

client_id = f"picture|securemode=2,signmethod=hmacsha1,timestamp=1719976922248|"
timestamp = str(int(time.time()))
username = f"picture&k17qfYOFMoU"
password = f"AF550D71D6D827463B6C1E9C06A52BA1C2715477"
broker = f"k17qfYOFMoU.iot-as-mqtt.cn-shanghai.aliyuncs.com"
port = 1883
# topic = f"/sys/k17qfYOFMoU/picture/thing/event/property/post"
topic = f"/k17qfYOFMoU/picture/user/update"
image_path = "./hs_img.jpg"

im_init = cv2.imread('../bg_image/Init.png')
im_AS = cv2.imread('../bg_image/AS.png')
im_MR = cv2.imread('../bg_image/MR.png')
im_MS = cv2.imread('../bg_image/MS.png')
im_MVP = cv2.imread('../bg_image/MVP.png')
im_wait = cv2.imread('../bg_image/GetWait.png')
im_taking = cv2.imread('../bg_image/GetData.png')
im_normal = cv2.imread('../bg_image/Norml.png')
im_welcome = cv2.imread('../bg_image/Welcome.png')
im_testing = cv2.imread('../bg_image/Wait.png')
im_none = cv2.imread('../bg_image/NoData.png')

def on_connect(client, userdata, flags, rc):
    print("Connected with result code " + str(rc))
    client.subscribe(f"/sys/k17qfYOFMoU/picture/thing/event/property/post")

def on_message(client, userdata, msg):
    print(msg.topic + " " + str(msg.payload))

def band_pass_filter(original_signal, order, fc1, fc2, fs):
    b, a = signal.butter(
        N=order, Wn=[2 * fc1 / fs, 2 * fc2 / fs], btype='bandpass')
    new_signal = signal.lfilter(b, a, original_signal)
    return new_signal

def plot_signal(audio_data, title=None):

    plt.figure(figsize=(6, 3), dpi=200)
    plt.plot(audio_data, linewidth=1)
    plt.grid()
    plt.savefig('../audio_img/audio_img.jpg')

class QtMain(Qt_ui):

    def __init__(self):

        super().__init__()
        self.setupUi(self)
        self.client = mqtt.Client(client_id=client_id)
        self.client.username_pw_set(username, password)
        self.client.on_connect = on_connect
        self.client.on_message = on_message
        self.client.connect(broker, port, 60)
        self.client.loop_start()
        self.flag = 0
        self.TestFlag = -1

        self.Show_Main()

    def setImage(self, img):

        res = cv2.resize(img, (800, 421), interpolation=cv2.INTER_CUBIC)  # 用cv2.resize设置图片大小  771
        img2 = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)  # opencv读取的bgr格式图片转换成rgb格式
        _image = QtGui.QImage(img2[:], img2.shape[1], img2.shape[0], img2.shape[1] * 3,
                              QtGui.QImage.Format_RGB888)  # pyqt5转换成自己能放的图片格式
        jpg_out = QtGui.QPixmap(_image)  # 转换成QPixmap
        self.label.setPixmap(jpg_out)  # 设置图片显示

    def Show_Main(self):

        self.TestFlag = -1
        # self.setImage(im_init)
        # time.sleep(8)
        self.setImage(im_welcome)

    def mqtt_publish(self,diagnosis,image_block=None,rate=None):

        random_number = random.randint(10, 200)
        payload = {
            "method": "thing.event.property.post",
            "params": {
                "number": str(random_number),
                "diagnosis": str(diagnosis),
                "image": image_block,
                "rate": rate
            }
        }
        result = self.client.publish(topic, json.dumps(payload))
        print(f"Publishing number {random_number} {len(json.dumps(payload))}: {json.dumps(payload)} with result: {result}")
        # print(f"Published: {json.dumps(payload)}")
        print(str(diagnosis))

    def get_image_base64_block(self,image_path="../audio_img/audio_img.jpg", block_size=102400):

        with open(image_path, "rb") as image_file:  # 二进制
            image_data = base64.b64encode(image_file.read()).decode('utf-8')
        for i in range(0, len(image_data), block_size):
            yield image_data[i:i + block_size]

    def Get_Audio_Data(self):

        self.setImage(im_taking)
        cv2.waitKey(1)

        # os.system('arecord -D "plughw:2,0" -f dat -c 1 -r 2000 -d 10 /home/pi/Desktop/Audio/test.wav')
        audio_path = '../Get_audio.wav'
        self.setImage(im_wait)
        audio_data, fs = sf.read(audio_path)
        # audio_data = (audio_data[:, 0] + audio_data[:, 1]) / 2
        print("原始音频数据：", audio_data.shape, "采样率：", fs)
        audio_data = band_pass_filter(audio_data, 2, 25, 400, fs)
        down_sample_audio_data = samplerate.resample(
            audio_data.T, 1000 / fs, converter_type='sinc_best').T
        down_sample_audio_data = down_sample_audio_data / np.max(np.abs(down_sample_audio_data))
        print("预处理音频数据：", down_sample_audio_data.shape, "采样率：", fs)
        plot_signal(down_sample_audio_data, title=audio_path)
        im = cv2.imread('../audio_img/audio_img.jpg')
        self.setImage(im)

        freq1, fre2, bi_spectrum = _polycoherence_2d(
            down_sample_audio_data[-2500:], nfft=1024, fs=1000, nperseg=256)
        print("Size1: ", np.array(bi_spectrum).shape)
        bi_spectrum = np.array(abs(bi_spectrum))  # calculate bi_spectrum
        bi_spectrum = bi_spectrum.reshape((256, 256, 1))
        bi_spectrum = 255 * (bi_spectrum - np.min(bi_spectrum)) / \
                      (np.max(bi_spectrum) - np.min(bi_spectrum))

        print("Size2: ", np.array(bi_spectrum).shape)

        global dataset
        dataset = np.ones((1, 256, 256, 1))
        dataset = dataset.astype('float32')

        dataset = np.vstack((dataset, np.array([bi_spectrum])))
        dataset = np.delete(dataset, 0, 0)
        dataset = dataset.astype('float32')
        self.TestFlag = 1
        print("OK")

    def Test_Two(self):

        self.setImage(im_testing)
        # Load the TFLite model and allocate tensors.
        # interpreter = tflite.Interpreter(model_path='model.tflite')
        interpreter2 = tf.lite.Interpreter(model_path='../hs_file/normal_abnormal.tflite')

        interpreter2.allocate_tensors()
        # Get input and output tensors.
        input_details = interpreter2.get_input_details()
        output_details = interpreter2.get_output_details()
        # Test the model on random input data.
        input_shape = input_details[0]['shape']
        interpreter2.set_tensor(input_details[0]['index'], dataset)
        interpreter2.invoke()
        output_data = interpreter2.get_tensor(output_details[0]['index'])[0]
        print(output_data)
        print(np.argmax(output_data))
        print()
        if np.argmax(output_data) == 0:
            self.setImage(im_normal)
        self.TestFlag = np.argmax(output_data)

    def Test_Four(self):

        self.setImage(im_testing)
        # Load the TFLite model and allocate tensors.
        # interpreter = tflite.Interpreter(model_path='model.tflite')
        interpreter4 = tf.lite.Interpreter(model_path='../hs_file/four_categories.tflite')
        interpreter4.allocate_tensors()
        # Get input and output tensors.
        input_details4 = interpreter4.get_input_details()
        output_details4 = interpreter4.get_output_details()
        # Test the model on random input data.
        input_shape = input_details4[0]['shape']
        interpreter4.set_tensor(input_details4[0]['index'], dataset)
        interpreter4.invoke()
        output_data = interpreter4.get_tensor(output_details4[0]['index'])[0]
        print(output_data)
        print(np.argmax(output_data))
        print()
        self.TestFlag = np.argmax(output_data)

    def Diagnosis_Result(self):

        if self.TestFlag == -1:
            print("还没有心音数据")
            self.setImage(im_none)
            self.mqtt_publish("还没有心音数据")
        else:
            self.Test_Two()
            diagnosis = ""
            if self.TestFlag == 0:
                self.setImage(im_normal)
                diagnosis = "正常"
            else:
                self.Test_Four()
                if self.TestFlag == 0:
                    self.setImage(im_AS)
                    diagnosis = "主动脉瓣狭窄"
                elif self.TestFlag == 1:
                    self.setImage(im_MS)
                    diagnosis = "二尖瓣狭窄"
                elif self.TestFlag == 2:
                    self.setImage(im_MR)
                    diagnosis = "二尖瓣反流"
                elif self.TestFlag == 3:
                    self.setImage(im_MVP)
                    diagnosis = "二尖瓣脱落"
            print(diagnosis)
            for image_block in self.get_image_base64_block():
                print(image_block)
                self.mqtt_publish(diagnosis,image_block)
            self.TestFlag = -1

    def Exit_APP(self):
        print("Exit")
        sys.exit(app.exec_())

if __name__ == "__main__":
    # QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
    app = QtWidgets.QApplication(sys.argv)
    window = QtMain()
    window.show()
    # window.showFullScreen()
    sys.exit(app.exec_())
