import os
import shutil
import sys
import threading
from time import time

import cv2
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QDialog, QApplication
from PyQt5.uic.properties import QtGui
import mmaction2.tools.predict as predict

from ActionManager.Knn_for_single import Knn_for_mmaction2, Knn_for_single
from AnnotationUtils import gen_poseC3d_annotation
from DatabaseInteraction import DatabaseInteraction
from ImageUtils import real_result_to_list_result, cvimg_to_qpixmap

sys.path.append('my_alphapose')
from my_alphapose.AlphaPoseService import alphapose_return_image, prepare_model
from ui.OnlineRecognitionUi import OnlineRecognitionUi


class OnlineRecognition(QDialog):
    def __init__(self):
        super(OnlineRecognition, self).__init__()
        self.ui = OnlineRecognitionUi()
        self.ui.setupUi(self)

        self.init_ui_set()
        self.init_ui_connect()

        self.database_interaction = DatabaseInteraction()
        self.cap = cv2.VideoCapture(-1)
        os.chdir('my_alphapose')
        self.det_model, self.pose_model = prepare_model()
        os.chdir('..')
        self.frame_list = []
        self.FRAME_CAPACITY = 64
        self.thread = None
        self.close_flag = 0

        self.selected_class = self.database_interaction.get_selected_class()
        print(1)
        self.play_video()

    def play_video(self):
        self.thread = threading.Thread(target=self.display)
        self.thread.start()

    def init_ui_set(self):
        pass

    def init_ui_connect(self):
        self.ui.exitButton.clicked.connect(self.exit_button)

    def display(self):
        dataset = self.database_interaction.get_id_class_and_json_address()
        model = predict.PoseC3D()
        model.prepare()
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        size = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        t1 = time()
        online_frame_index = 0
        flag = 0
        while self.cap.isOpened() and self.close_flag != 1:
            ret, frame = self.cap.read()
            if flag == 0:
                flag += 1
                continue
            else:
                flag = 0
            if ret:
                online_frame_index += 1
                os.chdir('my_alphapose')
                pose_image, pose_predict_result = alphapose_return_image(self.det_model, self.pose_model, frame)
                # print(pose_predict_result)
                os.chdir('..')
                self.frame_list.append(pose_predict_result)
                if len(self.frame_list) <= self.FRAME_CAPACITY:
                    self.ui.onlineLabel.setText('Prepare model now')
                    continue
                else:
                    del self.frame_list[0]
                    gen_poseC3d_annotation(pose_predict_result)
                    action_predict_result = model.predict()

                    action_predict_result =Knn_for_mmaction2().knn(action_predict_result[0])
                    label_list = []
                    dis_list = []
                    print(action_predict_result)
                    print('*'*15)
                    for i in action_predict_result:
                        label_list.append(i[0])
                        dis_list.append('%.4f'%i[1])
                    label1 = f'top1:{label_list[0]}, distance:{dis_list[0]}'
                    label2 = f'top2:{label_list[1]}, distance:{dis_list[1]}'
                    label3 = f'top3:{label_list[2]}, distance:{dis_list[2]}'
                    if action_predict_result is None:
                        pose_image = frame
                    else:
                        pose_image = cv2.putText(pose_image, label1, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
                                                2)
                        pose_image = cv2.putText(pose_image, label2, (25, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
                                                2)
                        pose_image = cv2.putText(pose_image, label3, (25, 85), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
                                                2)

                pose_pixmap = cvimg_to_qpixmap(pose_image)
                self.ui.onlineLabel.setPixmap(pose_pixmap)
                self.ui.onlineLabel.setScaledContents(True)

            else:
                break

            t2 = time()
            if t2 - t1 > 1:
                print(online_frame_index)
                online_frame_index = 0
                t1 = t2

        self.cap.release()
        cv2.destroyAllWindows()

    def exit_button(self):
        self.close_flag = 1
        self.close()


if __name__ == '__main__':
    app = QApplication(sys.argv)
    online_recognition = OnlineRecognition()
    online_recognition.show()
    app.exit(online_recognition.exec_())
