from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.Qt import QApplication, QMainWindow, QWidget, QProgressBar, QPushButton, QGridLayout, QBasicTimer, \
    pyqtSignal, QThread, QImage, QPixmap
from PyQt5 import QtGui
from UI_ import Ui_MainWindow
from yuying import get
import qdarkstyle
import sys
import socket
import time
import cv2
import numpy as np
import joblib
import math
import multiprocessing
import threading
from yuying import get


from pose.estimator import TfPoseEstimator
from pose.networks import get_graph_path, model_wh
from classier import get_joint_theta, joint_linear_k

#显存按需分配
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession


config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)


fou = cv2.VideoWriter_fourcc(*'XVID')


port = ('0.0.0.0', 8880)
port1 = ('0.0.0.0', 8881)

Image_thread1 = None
yuying_thread2 = None
img_data = None
yuyi = None

gui = QtGui.QGuiApplication.processEvents

class Image_thread_1(QThread):
    trigger = pyqtSignal(str)

    def __init__(self, ):
        super(Image_thread_1, self).__init__()
        self.sock_c = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock_c.bind(port1)

        self.sock_c.settimeout(4)
        self.go = True

    def stop(self):
        self.go = False
        self.sock_c.sendto('stop'.encode('utf-8'), self.c_addr)

    def run(self):
        while self.go:
            try:
                a, self.c_addr = self.sock_c.recvfrom(1024)

                self.sock_c.sendto('start1'.encode('utf-8'), self.c_addr)
                self.trigger.emit('1')
                while self.go:
                    try:
                        global yuyi
                        self.data = self.sock_c.recv(1024)
                        yuyi = self.data.decode('utf-8')
                        self.sock_c.sendto('start'.encode('utf-8'), self.c_addr)
                    except:
                        pass
            except:
                self.trigger.emit('0')


class test(QMainWindow, Ui_MainWindow):


    def closeEvent(self, event):
        sys.exit(app.exec_())

    # 处理姿态数据
    def write_infor(self):
        while self.start_:
            time.sleep(5)
            global img_data
            self.info = img_data
            if self.info_pre != self.info:
                if self.info_pre == '[1]':
                    self.lcdNumber.display(self.lcdNumber.intValue() + 1)
                if self.info_pre == '[2]':
                    self.lcdNumber_2.display(self.lcdNumber_2.intValue() + 1)
                if self.info_pre == '[3]':
                    self.lcdNumber_3.display(self.lcdNumber_3.intValue() + 1)
                if self.info_pre == '[4]':
                    self.lcdNumber_4.display(self.lcdNumber_4.intValue() + 1)
                if self.info_pre == '[5]':
                    self.lcdNumber_5.display(self.lcdNumber_5.intValue() + 1)
                if self.info_pre == '[6]':
                    self.lcdNumber_6.display(self.lcdNumber_6.intValue() + 1)
            if self.info_pre == self.info:
                if self.info_pre == '[1]':
                    a = (int(self.timeEdit.text().replace('.', '')) + 1) / 10
                    self.timeEdit.setText(str(a))
                if self.info_pre == '[2]':
                    a = (int(self.timeEdit_2.text().replace('.', '')) + 1) / 10
                    self.timeEdit_2.setText(str(a))
                if self.info_pre == '[3]':
                    a = (int(self.timeEdit_3.text().replace('.', '')) + 1) / 10
                    self.timeEdit_3.setText(str(a))
                if self.info_pre == '[4]':
                    a = (int(self.timeEdit_4.text().replace('.', '')) + 1) / 10
                    self.timeEdit_4.setText(str(a))
                if self.info_pre == '[5]':
                    a = (int(self.timeEdit_5.text().replace('.', '')) + 1) / 10
                    self.timeEdit_5.setText(str(a))
                if self.info_pre == '[6]':
                    a = (int(self.timeEdit_6.text().replace('.', '')) + 1) / 10
                    self.timeEdit_6.setText(str(a))
            self.info_pre = self.info

    # 处理语音数据
    def write(self):
        while self.start_:
            time.sleep(5)
            global yuyi
            yuyi=get()
            if yuyi == '' or yuyi == None:
                pass
            else:
                self.label_3.setText(yuyi)

    # 实时处理骨架
    def fun(self):
        global img_data
        start = time.time()
        humans = self.e.inference(self.show)
        npimg, joints, bboxes, xcenter = TfPoseEstimator.get_skeleton_1(self.show, humans, imgcopy=False)
        image = TfPoseEstimator.draw_humans(self.show, humans, imgcopy=False)
        joints = np.array(joints)
        out = 'no'
        if joints.shape != (1, 18, 2):
            out = 'wei shi bei dao'

        elif joints.shape == (1, 18, 2):
            joints = joints.reshape(1, 36)
            GBDT2 = joblib.load('GBDT.model')
            x_data = np.array(joint_linear_k(joints))
            a = np.array(get_joint_theta(x_data, joints))
            pi = 2 * math.pi
            a[np.isnan(a)] = pi
            result = (GBDT2.predict(a))
            out = str(result)
            print(out)
        img_data=out

        self.show = TfPoseEstimator.draw_humans(self.show, humans, imgcopy=False)
        end = time.time()
        fps = 1/(end - start)
        cv2.putText(self.show, 'FPS: %.2f' % fps, (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

    def start_all(self):
        # 远程连接=========
        self.start_ = True

        p = multiprocessing.Process(target=self.write())
        p.start()

        p = multiprocessing.Process(target=self.write_infor())
        p.start()


        # 远程连接========

        self.cap = cv2.VideoCapture(0)
        size = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        out = cv2.VideoWriter('C:/Users/Administrator/Desktop/out.avi', fou, 10.0, size)
        while self.start_:
            success, self.frame = self.cap.read()
            if success:
                gui()  # 流畅
                self.show = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

                # 屏幕显示
                self.fun()

                if self.record_m:
                    out.write(self.show)
                showImage = QImage(self.show.data, self.show.shape[1], self.show.shape[0], QImage.Format_RGB888)
                self.label.setPixmap(QPixmap.fromImage(showImage))


    # 断开连接，关闭所有设备
    def end_all(self):
        self.start_ = False
        self.cap.release()
        self.label.setText('No Movie')


    # 记录
    def record(self):
        if self.pushButton_2.text()[-2:] == 'ON':
            self.record_m = False
            self.pushButton_2.setText('Record=OFF')
        elif self.pushButton_2.text()[-2:] != 'ON':
            self.record_m = True
            self.pushButton_2.setText('Record=ON')

    def __init__(self, qq):
        super().__init__()
        super().setupUi(qq)
        w, h = model_wh('432x368')
        if w > 0 and h > 0:
            self.e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(w, h))
        else:
            self.e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(432, 368))
        # 事件监听
        self.pushButton.clicked.connect(self.start_all)
        self.pushButton_2.clicked.connect(self.record)
        self.pushButton_3.clicked.connect(self.end_all)

        self.info = None
        self.info_pre = None
        self.start_ = False
        self.record_m = False


if __name__ == "__main__":
    import sys

    app = QApplication(sys.argv)

    qw = QMainWindow()
    form = test(qw)
    qw.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
    app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())

    qw.show()
    app.exec_()