from camera import Camera, ESC_KEY
from checkerboard_pose_estimation import CheckerboardPoseEstimation
from robot_arm import ETController, ROBOT_IP
from hand_cam_calibration import CalibrationSolver
import cv2
import numpy as np
from cam_calibration import (
    CamCalibration,
    COLS,
    ROWS,
    SQUARE_SIZE,
    JSON_FILE,
    IMG_PATH,
    JSON_PATH,
)
import os
import json
import time

if __name__ == "__main__":
    controller = ETController(ROBOT_IP)

    camera = Camera()
    if not camera.configure_pipeline():
        exit()
    if not camera.start_pipeline():
        exit()

    if JSON_FILE:
        json_file_path = os.path.join(
            JSON_PATH, "intrinsic_params.json"
        )  # JSON文件的路径
        if os.path.exists(json_file_path):
            with open(json_file_path, "r") as f:
                intrinsic = json.load(f)
        else:
            print(f"JSON file {json_file_path} not found!")
            exit()
    else:
        calib = CamCalibration()
        intrinsic = calib.calibrate_camera(IMG_PATH)

    pose_estimator = CheckerboardPoseEstimation(intrinsic)

    solver = CalibrationSolver()

    hand_poses = []
    cam_poses = []

    start_time = time.time()
    frame_count = 0
    try:
        if controller.connect():
            # 获取机器人状态
            success_stat, result_stat, cmd_id_stat = controller.send_cmd(
                "getRobotState"
            )
            if success_stat:
                print("robot result:", result_stat)
            else:
                print("Failed to get robot state")

            while True:
                color_image = camera.get_frame()
                if color_image is None:
                    continue
                ret, cam_pose, distance = pose_estimator.estimate_pose(color_image)
                if ret:
                    cv2.putText(
                        color_image,
                        f"x: {cam_pose[0]:.2f}mm, y: {cam_pose[1]:.2f}mm, z: {cam_pose[2]:.2f}mm",
                        (10, color_image.shape[0] - 50),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.7,
                        (0, 255, 0),
                        2,
                    )
                    cv2.putText(
                        color_image,
                        f"dist: {distance:.2f}mm, pitch: {cam_pose[3]:.2f}, yaw: {cam_pose[4]:.2f}, roll: {cam_pose[5]:.2f}",
                        (10, color_image.shape[0] - 20),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.7,
                        (0, 255, 0),
                        2,
                    )
                else:
                    cv2.putText(
                        color_image,
                        "Unable to Detect Chessboard",
                        (20, color_image.shape[0] - 20),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1.3,
                        (0, 0, 255),
                        3,
                    )

                frame_count += 1
                elapsed_time = time.time() - start_time
                if elapsed_time > 1.0:
                    fps = frame_count / elapsed_time
                    print(f"FPS: {fps:.2f}")
                    start_time = time.time()
                    frame_count = 0

                cv2.imshow("Color Viewer", color_image)
                key = cv2.waitKey(1)
                if key == ESC_KEY:
                    break
                elif key == ord("w"):
                    # 获取机器人当前位姿
                    success_pose, hand_pose, cmd_id_pose = controller.send_cmd(
                        "get_tcp_pose",
                        {"coordinate_num": -1, "tool_num": 0, "unit_type": 0},
                    )
                    if success_pose:
                        print("robot result:", hand_pose)
                    else:
                        print("Failed to get TCP pose")

                    if success_pose and ret:
                        hand_poses.append(hand_pose)
                        cam_poses.append(cam_pose)
                        print("Saved hand_pose and cam_pose.")
                        print("Hand_pose result:", hand_poses)
                        print("Cam_pose result:", cam_poses)
                elif key == ord("q"):
                    if hand_poses and cam_poses:
                        hand_poses_flat = np.array(hand_poses).flatten()
                        cam_poses_flat = np.array(cam_poses).flatten()
                        calibration_result = solver.solve_calibration(
                            hand_poses_flat, cam_poses_flat
                        )
                        print("Calibration result:", calibration_result)

                        # Save the calibration result to a JSON file with a unique name
                        timestamp = time.strftime("%Y%m%d-%H%M%S")
                        json_filename = f"calibration_result_{timestamp}.json"
                        json_filepath = os.path.join(JSON_PATH, json_filename)
                        calibration_result_list = calibration_result.tolist()
                        data_to_save = {
                            "calibration_result": calibration_result_list,
                            "hand_poses_flat": hand_poses_flat.tolist(),
                            "cam_poses_flat": cam_poses_flat.tolist()
                        }
                        
                        with open(json_filepath, "w") as json_file:
                            json.dump(data_to_save, json_file)
                        print(f"Calibration result saved to {json_filepath}")
                    break
    except KeyboardInterrupt:
        pass
    finally:
        controller.disconnect()
        camera.stop_pipeline()
        cv2.destroyAllWindows()
