from AnnotationUtils import gen_poseC3d_annotation
import json
import sys

from ImageUtils import cvimg_to_qpixmap, real_result_to_list_result
from ActionManager.Knn_for_single import Knn_for_single

sys.path.append('my_alphapose')
import os
import threading
import time
import shutil

from PyQt5 import QtGui
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import *

from my_alphapose import AlphaPoseService
from DatabaseInteraction import DatabaseInteraction
from my_alphapose.AlphaPoseService import prepare_model, alphapose_return_image
from my_alphapose import SPPE
from ui.fromcameradialog import Ui_FromCameraDialog
import cv2


# os.environ['QT_QPA_PLATFORM'] = 'wayland' # for wayland in linux
# os.environ['QT_QPA_PLATFORM'] = 'windows'
# os.environ['QT_DEBUG_PLUGINS'] = '1'


class FromCameraDialog(QDialog):
    def __init__(self):
        super(FromCameraDialog, self).__init__()
        # 调用默认摄像头
        # self.cap = cv2.VideoCapture('''rtsp://admin:haikang123456@10.21.7.168:554/h264/chi/main/av_stream''')
        self.database_interaction = DatabaseInteraction()
        self.all_action_dir = '/home/primer/videos/action_videos'
        self.cap = cv2.VideoCapture(-1)
        self.action_out_dir = os.path.join(self.all_action_dir, self.database_interaction.get_insert_id())
        self.video_address = self.action_out_dir + '/alphapose_video0.avi'
        self.fps = 15

        self.frame_list = []
        self.FRAME_CAPACITY = 100
        self.save_data = {}

        os.chdir('my_alphapose')
        self.det_model, self.pose_model = prepare_model()
        os.chdir('..')
        self.ui = Ui_FromCameraDialog()

        self.ui.setupUi(self)
        self.init_ui_set()
        self.init_ui_connect()

        self.thread = None

        self.frame_index = 0
        self.start_flag = 0
        self.close_flag = 0
        self.class_ = 'drink water'  # default set

        self.play_video()
        # self.model = predict.PoseC3D()
        # self.model.prepare()

    def init_ui_set(self):
        self.ui.image_label.setScaledContents(True)
        id_str = self.database_interaction.get_insert_id()
        self.ui.IDLabel.setText(f'id: {id_str}')
        self.init_class_box()

    def init_ui_connect(self):
        self.ui.ensureButton.clicked.connect(self.accept_button)
        self.ui.exitButton.clicked.connect(self.reject_button)
        self.ui.startButton.clicked.connect(self.start_button)
        self.ui.endButton.clicked.connect(self.end_button)
        self.ui.classBox.currentIndexChanged.connect(self.class_changed)

    def init_class_box(self):
        class_list = self.database_interaction.get_all_class()
        for i in class_list:
            self.ui.classBox.addItem(i)

    def play_video(self):
        self.thread = threading.Thread(target=self.display)
        self.thread.start()

    def display(self):
        dataset = self.database_interaction.get_id_class_and_json_address()

        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        size = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        if os.path.exists(self.action_out_dir) is False:
            os.mkdir(self.action_out_dir)
        else:
            shutil.rmtree(self.action_out_dir)
            os.mkdir(self.action_out_dir)

        self.video_out = cv2.VideoWriter(os.path.join(self.video_address), fourcc, self.fps, size)
        online_frame_index = 0

        t1 = time.time()
        flag = 0

        while self.cap.isOpened() and self.close_flag == 0:
            ret, frame = self.cap.read()
            if flag == 0:
                flag += 1
            else:
                flag = 0
                continue
            if ret:
                # 姿态估计（内嵌目标跟踪）
                pose_image, pose_predict_result = alphapose_return_image(self.det_model, self.pose_model, frame)
                print(pose_predict_result)
                # 将alphapose的注释文件转为poseC3D的格式，并进行动作识别
                # gen_poseC3d_annotation(pose_predict_result)
                # action = self.model.predict()

                # vis_frame = cv2.putText(vis_frame, action, (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

                dict_result = real_result_to_list_result(pose_predict_result)
                self.save_data['frame' + str(self.frame_index).zfill(4)] = dict_result

                pose_pixmap = cvimg_to_qpixmap(pose_image)
                rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                convert_to_qt_format = QtGui.QImage(rgb_image.data, rgb_image.shape[1], rgb_image.shape[0],
                                                    QImage.Format_RGB888)  # 在这里可以对每帧图像进行处理

                if self.start_flag == 1:
                    self.video_out.write(pose_image)
                self.ui.image_label.setPixmap(QPixmap.fromImage(convert_to_qt_format))
                self.ui.ensureView.setPixmap(pose_pixmap)
                self.ui.ensureView.setScaledContents(True)
                self.frame_index += 1
            else:
                break

            online_frame_index += 1
            t2 = time.time()
            if t2 - t1 > 1:
                print(online_frame_index)
                online_frame_index = 0
                t1 = t2

        self.cap.release()
        self.video_out.release()
        cv2.destroyAllWindows()

    def start_button(self):
        result = QMessageBox.question(self, '确认开始框', '确认要开始吗？', QMessageBox.No, QMessageBox.Yes)
        if result == QMessageBox.Yes:
            self.start_flag = 1
            self.frame_index = 0

    def end_button(self):
        if self.start_flag == 1:
            self.start_flag = 0
            QMessageBox.information(self, '提示', '完成录制', QMessageBox.Yes)
            self.frame_index = 0
        else:
            QMessageBox.information(self, '提示', '你还没有开始', QMessageBox.Yes)

    def accept_button(self):
        self.close_flag = 1
        self.database_interaction.insert_one_data(
            self.database_interaction.get_insert_id(),
            self.video_address,
            self.class_,
            os.path.join(self.action_out_dir, 'action_result.json'))
        json_address = os.path.join(self.action_out_dir, 'action_result.json')

        with open(json_address, 'w') as f:
            '''写入json文件'''
            json.dump(self.save_data, f)
        self.close()

    def reject_button(self):
        self.close_flag = 1
        self.close()

    def closeEvent(self, QCloseEvent):
        self.close_flag = 1

    def class_changed(self):
        self.class_ = self.ui.classBox.currentText()

    def clear(self):
        self.frame_index = 0
        self.save_data = {}
        pass


if __name__ == '__main__':
    app = QApplication(sys.argv)
    from_camera_dialog = FromCameraDialog()
    from_camera_dialog.show()
    sys.exit(app.exec_())
