import json
import time

from TRT.rtmpose.rtmpose_inference import halpe26, draw_skeleton
from TRT.common import YOLOv10RTInference, PoseTracker

TRTPATH = r"TRT/weights/yolov10s.engine"
yolov10_trt_ins = YOLOv10RTInference(TRTPATH)
TRTPATH = "TRT/weights/end2end.engine"
PoseTracker_trt_ins = PoseTracker(TRTPATH)
from utils.get_img import DepthReceiver 
import cv2
import numpy as np
import traceback
import tkinter as tk
from tkinter import messagebox
from PIL import Image, ImageTk
import threading
from datetime import datetime

fx, fy = 238.494, 233.194
cx, cy = 229.981, 151.422
rotation_matrix = np.array([
    [1, 0.000752356, -0.000484363],
    [-0.000754182, 0.999993, -0.00378062],
    [0.000481515, 0.00378098, 0.999993]
])
w, h  = 640, 352

class Application:
    def __init__(self, root):
        self.root = root
        self.root.title("Real-time Display")
        self.depth_receiver = DepthReceiver()

        # 设置界面布局
        self.is_running = False
        self.keypoints_3d_list = []

        self.image_frame = tk.Frame(root)
        self.image_frame.pack()

        self.color_label = tk.Label(self.image_frame, text="Color Image")
        self.color_label.pack(side="left", padx=5, pady=5)

        self.depth_label = tk.Label(self.image_frame, text="Depth Image")
        self.depth_label.pack(side="left", padx=5, pady=5)

        self.start_button = tk.Button(root, text="开始", command=self.start_collecting, bg="green", fg="white")
        self.start_button.pack(pady=10)

        self.stop_button = tk.Button(root, text="结束", command=self.stop_collecting, bg="red", fg="white")
        self.stop_button.pack(pady=10)

        # 启动实时图像显示线程
        self.display_thread = threading.Thread(target=self.collect_data)
        self.display_thread.start()

        self.init_distance = np.zeros((27,)) + 3000

        self.z = np.zeros(27)

    def project_to_3d(self, keypoints_2d, depth_map, fx=fx, fy=fy, cx=cx, cy=cy, rotation_matrix=None):
        """
        将 2D 关键点投影到 3D 空间。

        Args:
            keypoints_2d (numpy.ndarray): 形状为 (1, N, 2) 的 2D 关键点。
            depth_map (numpy.ndarray): 深度图，形状与 RGB 图像一致。
            fx (float): 相机内参 fx。
            fy (float): 相机内参 fy。
            cx (float): 相机内参 cx。
            cy (float): 相机内参 cy。
            rotation_matrix (numpy.ndarray): 旋转矩阵，默认是单位矩阵。

        Returns:
            numpy.ndarray: 3D 关键点，形状为 (1, N, 3)。
        """
        if rotation_matrix is None:
            rotation_matrix = np.eye(3)

        # 确保关键点为 numpy 数组
        keypoints_2d = np.array(keypoints_2d)

        mid_shoulder = (keypoints_2d[0][5] + keypoints_2d[0][6]) / 2
        mid_shoulder = mid_shoulder[np.newaxis, :]
        keypoints_2d = np.concatenate((keypoints_2d[0], mid_shoulder), axis=0)
        keypoints_2d = keypoints_2d[np.newaxis, :]

        num_points = keypoints_2d.shape[1]

        # 初始化 3D 关键点数组
        keypoints_3d = np.zeros((1, num_points, 3))
        for i in range(num_points):
            x_2d, y_2d = keypoints_2d[0, i]
            x_2d = int(x_2d)
            y_2d = int(y_2d)
            if x_2d >= w:
                x_2d = w - 1
            if y_2d >= h:
                y_2d = h - 1
            if x_2d < 0:
                x_2d = 0
            if y_2d < 0:
                y_2d = 0
            # print(x_2d,y_2d)
            # 深度值 (假设从深度图中获取)
            z = depth_map[int(y_2d), int(x_2d)]
            # z = self.min_depth(depth_map, i, x_2d, y_2d)
            if z != 0:
                self.z[i] = z
            elif z == 0:
                z = self.z[i]
            # 从 2D 到相机坐标系
            x_cam = (x_2d - cx) * z / fx
            y_cam = (y_2d - cy) * z / fy

            # 应用旋转矩阵
            point_world = np.array([x_cam, y_cam, z])
            point_world = np.dot(rotation_matrix, point_world)
            # print(point_world,i)
            keypoints_3d[0, i] = np.round(point_world).astype(np.int32)
        return keypoints_3d

    def save_keypoints_to_json(self, keypoints_list, output_path):
        """
        将 3D 关键点保存为 JSON 文件。

        Args:
            keypoints_3d (numpy.ndarray): 形状为 (1, N, 3) 的 3D 关键点。
            output_path (str): 保存的 JSON 文件路径。
        """
        # 获取当前时间并格式化为字符串
        current_time = datetime.now().strftime('%Y%m%d_%H%M%S')
        file_name = f'keypoints_{current_time}.json'
        output_path = f'{output_path}/{file_name}'

        if not isinstance(keypoints_list, list):
            keypoints_list = keypoints_list.tolist()
        frame_length = len(keypoints_list)
        all_frames_keypoints = []
        for frame_idx in range(frame_length):
            single_frame_data = []
            keypoints_3d = keypoints_list[frame_idx]
            for idx, keypoint in enumerate(keypoints_3d):
                if idx ==26:
                    continue
                keypoint_name = halpe26['keypoint_info'][idx]['name']
                single_frame_data.append({f"{keypoint_name}": keypoint.tolist()})
            all_frames_keypoints.append(single_frame_data)
        # 保存为 JSON 文件
        with open(output_path, 'w') as json_file:
            json.dump(all_frames_keypoints, json_file)
        print(f"3D keypoints saved to {output_path}")
    def min_depth(self, depth: np.ndarray, index: int, x_point: int, y_point: int, val_range=5):
        x_max = min((x_point + val_range), 640 - 1)
        x_min = max((x_point - val_range), 0)
        y_max = min((y_point + val_range), 480 - 1)
        y_min = max((y_point - val_range), 0)
        dep_img = depth[y_min:y_max, x_min:x_max]
        max_val = np.max(dep_img)
        dep_img[dep_img == 0] = max_val
        result = int(np.min(dep_img))
        if abs(result - self.init_distance[index]) > 1500:
            result = self.init_distance[index]
        if result == 0:
            result = self.init_distance[index]
            self.init_distance[index] = result + 1
        else:
            self.init_distance[index] = result
        return result
    def start_collecting(self):
        if self.is_running:
            messagebox.showinfo("提示", "数据采集已在运行！")
            return

        self.is_running = True
        self.keypoints_3d_list = []
        messagebox.showinfo("提示", "开始采集数据！")

    def stop_collecting(self):
        if not self.is_running:
            messagebox.showinfo("提示", "数据采集尚未开始！")
            return

        self.is_running = False
        self.save_keypoints_to_json(self.keypoints_3d_list, './output')
        messagebox.showinfo("提示", "数据已保存！")
    def update_show_image(self, color_image, depth_image):
        # 将 OpenCV 图像转换为 Tkinter 图像
        color_image_tk = self.convert_to_tk_image(color_image)
        depth_image_tk = self.convert_to_tk_image(depth_image, is_depth=True)
        # 更新界面图像
        self.color_label.configure(image=color_image_tk)
        self.color_label.image = color_image_tk

        self.depth_label.configure(image=depth_image_tk)
        self.depth_label.image = depth_image_tk
    def collect_data(self):
        try:
            frame_cnt = 0
            det_frequency = 5
            target_fps = 30  # 目标帧率
            frame_interval = 1.0 / target_fps  # 每帧的目标时间间隔
            while True:
                start_time = time.time()  # 记录开始时间

                # 获取图像帧
                color_image, depth_image = self.depth_receiver.receive_frame()
                if color_image is None or depth_image is None:
                    continue
                if frame_cnt % det_frequency == 0:
                    box = yolov10_trt_ins.get_bbox(color_image)

                    if box is None:
                        frame_cnt = 0
                        self.update_show_image(color_image, depth_image)
                        continue
                    keypoints, scores = PoseTracker_trt_ins(color_image, box)
                else:
                    keypoints, scores = PoseTracker_trt_ins(color_image)
                    frame_cnt+=1
                color_image = draw_skeleton(color_image,
                                            keypoints,
                                            scores,
                                            openpose_skeleton=False,
                                            kpt_thr=0.3,
                                            line_width=3)
                keypoints_3d = self.project_to_3d(keypoints, depth_image)[0]
                # distance = (keypoints_3d[5][2] + keypoints_3d[6][2]) / 200
                distance = keypoints_3d[26][2]/ 100
                cv2.putText(color_image, str(int(distance)), (50, 50), 1, 2, (255, 0, 255), 2, cv2.LINE_AA)
                distance = keypoints_3d[19][2]/ 100
                cv2.putText(color_image, str(int(distance)), (100, 50), 1, 2, (255, 0, 255), 2, cv2.LINE_AA)
                cv2.rectangle(color_image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
                if self.is_running:
                    self.keypoints_3d_list.append(keypoints_3d)
                self.update_show_image(color_image, depth_image)
                # 控制帧率
                elapsed_time = time.time() - start_time
                sleep_time = max(0, frame_interval - elapsed_time)
                time.sleep(sleep_time)
        except BaseException as e:
            # 打印出错误类型和错误位置
            traceback.print_exc()

    def convert_to_tk_image(self, image, is_depth=False):
        """将 OpenCV 图像转换为 Tkinter 可显示图像"""
        if is_depth:
            normalized_depth = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX)
            depth_colored = cv2.applyColorMap(normalized_depth.astype(np.uint8), cv2.COLORMAP_JET)
            image = depth_colored

        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        im_pil = Image.fromarray(image_rgb)
        return ImageTk.PhotoImage(im_pil)
if __name__ == "__main__":
    # 创建 Tkinter 主窗口
    root = tk.Tk()
    # 初始化 Application 实例
    app = Application(root)
    # 运行 Tkinter 主事件循环
    root.mainloop()
