import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QFrame
from Qt_Audio import Ui_Form
import samplerate
import tensorflow as tf
import soundfile as sf
from scipy import signal
import matplotlib.pyplot as plt
from polycoherence import _polycoherence_2d
import cv2
import numpy as np
from PIL import Image
import os
import time
import paho.mqtt.client as mqtt
import struct
import json
import threading

DEV_ID = "936758781" #设备ID
PRO_ID = "512539" #产品ID
AUTH_INFO = "XlrDquIoIw80UavdlWJYq1=W=xM="  #APIKEY
# Master-APIkey  j5RjN96NceJsiQ=EZP72zbeIgQE=
# access_key     fcqQG2CTENi0IYzaF2E3RwSbGZ4uFAK0my1uuMBC4XM=

TYPE_JSON = 0x01
TYPE_FLOAT = 0x17

im_init = cv2.imread('Img/Init.bmp')
im_AS = cv2.imread('Img/AS.bmp')
im_MR = cv2.imread('Img/MR.bmp')
im_MS = cv2.imread('Img/MS.bmp')
im_MVP = cv2.imread('Img/MVP.bmp')
im_wait = cv2.imread('Img/GetWait.bmp')
im_taking = cv2.imread('Img/GetData.bmp')
im_normal = cv2.imread('Img/Norml.bmp')
im_welcome = cv2.imread('Img/Welcome.bmp')
im_testing = cv2.imread('Img/Wait.bmp')
im_none = cv2.imread('Img/NoData.bmp')

def build_payload(type, payload):
    datatype = type
    packet = bytearray()
    packet.extend(struct.pack("!B", datatype))
    if isinstance(payload, str):
        udata = payload.encode('utf-8')
        length = len(udata)
        packet.extend(struct.pack("!H" + str(length) + "s", length, udata))
    return packet

# 当客户端收到来自服务器的CONNACK响应时的回调。也就是申请连接，服务器返回结果是否成功等
def on_connect(client, flags, rc, userdata):
    body = {
        "datastreams": [
            {
                "id": "Raspberry",  # 对应OneNet的数据流名称
                "datapoints": [
                    {
                        "value":"OK"  # 数据值
                    }
                ]
            }
        ]
    }
    json_body = json.dumps(body)
    packet = build_payload(TYPE_JSON, json_body)
    client.publish("$dp", packet, qos=1)  #qos代表服务质量

# 从服务器接收发布消息时的回调。
def on_message(client, userdata, msg):
    print("接收成功:"+str(msg.payload, 'utf-8'))

#当消息已经被发送给中间人，on_publish()回调将会被触发
def on_publish(client, userdata, mid):
    print("Send OK")

def band_pass_filter(original_signal, order, fc1, fc2, fs):
    b, a = signal.butter(
        N=order, Wn=[2 * fc1 / fs, 2 * fc2 / fs], btype='bandpass')
    new_signal = signal.lfilter(b, a, original_signal)
    return new_signal

def plot_signal(audio_data, title=None):
    plt.figure(figsize=(6, 4), dpi=200)
    plt.plot(audio_data, linewidth=1)
    plt.grid()
    plt.savefig('hs_data/hs_img.jpg')

class PyQtMainEntry(QFrame, Ui_Form):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.setImage(im_init)
        self.TestFlag = -1
        self.client = mqtt.Client(client_id=DEV_ID, protocol=mqtt.MQTTv311)
        self.client.on_connect = on_connect
        self.client.on_publish = on_publish
        self.client.on_message = on_message
        self.client.username_pw_set(username=PRO_ID, password=AUTH_INFO)
        self.client.connect('183.230.40.39', port=6002, keepalive=120)
        self.Mqtt_Loop = threading.Thread(target=self.client.loop_forever, daemon=True)
        self.Mqtt_Loop.start()

        self.MainShow()

    def setImage(self, img):
        res = cv2.resize(img, (771, 421), interpolation=cv2.INTER_CUBIC)  # 用cv2.resize设置图片大小
        img2 = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)  # opencv读取的bgr格式图片转换成rgb格式
        _image = QtGui.QImage(img2[:], img2.shape[1], img2.shape[0], img2.shape[1] * 3,
                              QtGui.QImage.Format_RGB888)  # pyqt5转换成自己能放的图片格式
        jpg_out = QtGui.QPixmap(_image)  # 转换成QPixmap
        self.label.setPixmap(jpg_out)  # 设置图片显示

    def MainShow(self):
        self.TestFlag = -1
        self.setImage(im_welcome)

    def Get_Data(self):
        self.setImage(im_taking)
        cv2.waitKey(1)
        os.system('arecord -D "plughw:2,0" -f dat -c 1 -r 16000 -d 10 /home/pi/Desktop/Audio/test.wav')
        audio_path = 'test.wav'
        self.setImage(im_wait)
        audio_data, fs = sf.read(audio_path)
        # audio_data = (audio_data[:, 0] + audio_data[:, 1]) / 2
        print("原始音频数据点数：", audio_data.shape, "采样率：", fs)
        audio_data = band_pass_filter(audio_data, 2, 25, 400, fs)
        down_sample_audio_data = samplerate.resample(
            audio_data.T, 1000 / fs, converter_type='sinc_best').T
        down_sample_audio_data = down_sample_audio_data / np.max(np.abs(down_sample_audio_data))
        print("现音频数据点数：", down_sample_audio_data.shape, "采样率：", 1000)
        plot_signal(down_sample_audio_data, title=audio_path)
        im = cv2.imread('hs_data/hs_img.jpg')
        self.setImage(im)

        freq1, fre2, bi_spectrum = _polycoherence_2d(
            down_sample_audio_data[-2500:], nfft=1024, fs=1000, nperseg=256)
        print("Size1: ", np.array(bi_spectrum).shape)
        bi_spectrum = np.array(abs(bi_spectrum))  # calculate bi_spectrum
        bi_spectrum = bi_spectrum.reshape((256, 256, 1))
        bi_spectrum = 255 * (bi_spectrum - np.min(bi_spectrum)) / \
                      (np.max(bi_spectrum) - np.min(bi_spectrum))

        print("Size2: ", np.array(bi_spectrum).shape)

        global dataset
        dataset = np.ones((1, 256, 256, 1))
        dataset = dataset.astype('float32')

        dataset = np.vstack((dataset, np.array([bi_spectrum])))
        dataset = np.delete(dataset, 0, 0)
        dataset = dataset.astype('float32')
        self.TestFlag = 1
        print("OK")

    def Test_Two(self):
        self.setImage(im_testing)
        # Load the TFLite model and allocate tensors.
        # interpreter = tflite.Interpreter(model_path='model.tflite')
        interpreter2 = tf.lite.Interpreter(model_path='model/normal_abnormal.tflite')

        interpreter2.allocate_tensors()
        # Get input and output tensors.
        input_details = interpreter2.get_input_details()
        output_details = interpreter2.get_output_details()
        # Test the model on random input data.
        input_shape = input_details[0]['shape']
        interpreter2.set_tensor(input_details[0]['index'], dataset)
        interpreter2.invoke()
        output_data = interpreter2.get_tensor(output_details[0]['index'])[0]
        print(output_data)
        print(np.argmax(output_data))
        print()
        if np.argmax(output_data) == 0:
            self.setImage(im_normal)
        self.TestFlag = np.argmax(output_data)

    def Test_Four(self):
        self.setImage(im_testing)
        # Load the TFLite model and allocate tensors.
        # interpreter = tflite.Interpreter(model_path='model.tflite')
        interpreter4 = tf.lite.Interpreter(model_path='model/four_categories.tflite')
        interpreter4.allocate_tensors()
        # Get input and output tensors.
        input_details4 = interpreter4.get_input_details()
        output_details4 = interpreter4.get_output_details()
        # Test the model on random input data.
        input_shape = input_details4[0]['shape']
        interpreter4.set_tensor(input_details4[0]['index'], dataset)
        interpreter4.invoke()
        output_data = interpreter4.get_tensor(output_details4[0]['index'])[0]
        print(output_data)
        print(np.argmax(output_data))
        print()
        self.TestFlag = np.argmax(output_data)
            
    def Diagnosis(self):
        if self.TestFlag == -1:
            print("还没有心音数据")
            self.setImage(im_none)
        else:
            self.Test_Two()
            str = ""
            if self.TestFlag == 0:
                self.setImage(im_normal)
                str = "正常"
            else:
                self.Test_Four()
                if self.TestFlag == 0:
                    self.setImage(im_AS)
                    str = "主动脉瓣狭窄"
                elif self.TestFlag == 1:
                    self.setImage(im_MS)
                    str = "二尖瓣狭窄"
                elif self.TestFlag == 2:
                    self.setImage(im_MR)
                    str = "二尖瓣反流"
                elif self.TestFlag == 3:
                    self.setImage(im_MVP)
                    str = "二尖瓣脱落"
            print(str)
            body = {
                "datastreams": [
                    {
                        "id": "Data",  # 对应OneNet的数据流名称
                        "datapoints": [
                            {
                                "value": str  # 数据值
                            }
                        ]
                    }
                ]
            }
            json_body = json.dumps(body)
            packet = build_payload(TYPE_JSON, json_body)
            self.client.publish("$dp", packet, qos=1)  # qos代表服务质量
            self.TestFlag = -1

    def Exit(self):
        print("Exit")
        sys.exit(app.exec_())
            
if __name__ == "__main__":
    QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
    app = QtWidgets.QApplication(sys.argv)
    window = PyQtMainEntry()
    window.show()
    # window.showFullScreen()
    sys.exit(app.exec_())
