import argparse
import json
import os

import cv2
import numpy as np
import yaml

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

from tf.transformations import quaternion_matrix


def pose2list(pose):
    pos = [pose['position']['x'], pose['position']['y'], pose['position']['z']]
    ort = [pose['orientation']['x'], pose['orientation']['y'], pose['orientation']['z'], pose['orientation']['w']]
    return pos, ort

def camera_int_ext(image_list, width, height, square_dim):
    CHESSBOARD_WIDTH = width
    CHESSBOARD_HEIGHT = height
    CALIBRATION_SQUARE_DIMENSION = square_dim  # meters

    chessboardDimension = (CHESSBOARD_WIDTH, CHESSBOARD_HEIGHT)

    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.0001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((CHESSBOARD_HEIGHT*CHESSBOARD_WIDTH, 3), np.float32)
    objp[:, :2] = np.mgrid[0:CHESSBOARD_WIDTH,
                           0:CHESSBOARD_HEIGHT].T.reshape(-1, 2)*CALIBRATION_SQUARE_DIMENSION

    # Arrays to store object points and image points from all the images.
    objPoints = []  # 3d point in real world space
    imgPoints = []  # 2d points in image plane.

    for fname in image_list:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, chessboardDimension)
        if ret:
            print('Chess board found in {}'.format(fname))

            objPoints.append(objp)
            corners2 = cv2.cornerSubPix(
                gray, corners, (3, 3), (-1, -1), criteria)
            imgPoints.append(corners2)

    if len(objPoints) > 0:
        print("Running Calibration...")
        retval, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(objPoints, imgPoints,
                                                                             chessboardDimension, None, None)
        print("Camera extrinsic matrix extrated.")
        # print("Camera Matrix = |fx  0 cx|")
        # print("                | 0 fy cy|")
        # print("                | 0  0  1|")
        # print(cameraMatrix)

        # print("\nDistortion Coefficients = (k1, k2, p1, p2, k3)")
        # print(distCoeffs)

        # calcReprojectionError(objPoints, imgPoints, cameraMatrix, distCoeffs, rvecs, tvecs)
    else:
        cameraMatrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
        distCoeffs = [[0, 0, 0, 0, 0]]
        rvecs = []
        tvecs = []

    return cameraMatrix, distCoeffs, rvecs, tvecs

def draw_axes(base, cam):
    fig = plt.figure()
    ax = fig.gca(projection='3d')

    x = [base[0, 3], base[0, 0]]
    y = [base[1, 3], base[1, 0]]
    z = [base[2, 3], base[2, 0]]
    ax.plot(x, y, z, color='r', label='base frame')

    x = [base[0, 3], base[0, 1]]
    y = [base[1, 3], base[1, 1]]
    z = [base[2, 3], base[2, 1]]
    ax.plot(x, y, z, color='g', label='base frame')

    x = [base[0, 3], base[0, 2]]
    y = [base[1, 3], base[1, 2]]
    z = [base[2, 3], base[2, 2]]
    ax.plot(x, y, z, color='b', label='base frame')


    x = [cam[0, 3], cam[0, 0]]
    y = [cam[1, 3], cam[1, 0]]
    z = [cam[2, 3], cam[2, 0]]
    ax.plot(x, y, z, color='r', label='cam frame')

    x = [cam[0, 3], cam[0, 1]]
    y = [cam[1, 3], cam[1, 1]]
    z = [cam[2, 3], cam[2, 1]]
    ax.plot(x, y, z, color='g', label='cam frame')

    x = [cam[0, 3], cam[0, 2]]
    y = [cam[1, 3], cam[1, 2]]
    z = [cam[2, 3], cam[2, 2]]
    ax.plot(x, y, z, color='b', label='cam frame')
    ax.legend()

    plt.savefig('output.png')


if __name__ == "__main__":
    
    ap = argparse.ArgumentParser()
    ap.add_argument("-c", "--config_path", required=False,
                    default='../config/config.yaml', help="path to the yaml config file")
    args = vars(ap.parse_args())

    # load json file and image file
    yaml_path = args["config_path"]
    print('Loading config YAML from {}...'.format(yaml_path))
    with open(yaml_path) as f:
        data_dict = yaml.load(f)  # model dict

    path = data_dict["data_path"]
    chess_board_width = data_dict["chessboard_width"]
    chess_board_height = data_dict["chessboard_height"]
    chess_board_size = data_dict["square_dimension"]
    print('Configure loaded.')

    R_g2b_list = []
    t_g2b_list = []
    R_t2c_list = []
    t_t2c_list = []
    image_list = []

    files = os.listdir(path)
    for f in files:
        if '.json' not in f: # handle only the json files
            continue
        
        print('Handling file: {}...'.format(f))
        ## first try find a corresponding image file
        # extract file name
        filename = f[:-5]
        if '{}.jpg'.format(filename) not in files:
            print('Corresponding image file for JSON file {} NOT found.'.format(filename))
            continue
        
        # load the json to a dict
        l = open(os.path.join(path, f)).readline()
        j = json.loads(l)

        # convert the dict to lists
        pose, orientation = pose2list(j)
        R = quaternion_matrix(orientation)

        R_g2b = np.array(R)[:3,:3]
        t_g2b = np.array([pose]).T

        R_g2b_list.append(R_g2b)
        t_g2b_list.append(t_g2b)

        image_list.append(os.path.join(path, '{}.jpg'.format(filename)))
        
    # extract extrinsic parameters from the images
    cameraMatrix, distCoeffs, R_t2c_list, t_t2c_list = camera_int_ext(image_list,chess_board_width, chess_board_height, chess_board_size)

    # print('R_target2cam:')
    # print(R_t2c_list)
    # print('t_target2cam:')
    # print(t_t2c_list)

    # print('R_gripper2base:')
    # print(R_g2b_list)
    # print('t_gripper2base:')
    # print(t_g2b_list)

    # solve AX=XB
    # Opencv 4.x add calibrateHandEye method for hand-in-eye configuration
    # Please make sure OpenCV 4.x is installed
    # Please note Python 2.7 only support 4.2.x. No higher version will be supported.
    # The form is
    # target2cam(1) * cam2gripper * gripper2base(1) = target2cam(2) * cam2gripper * gripper2base(2)
    #   para1: g2b | para2: t2c | output: c2g
    # Similarly, we could use it for hand-to-eye configuration
    # cam2target(1) * target2gripper * gripper2base(1) = cam2target(2) * target2gripper * gripper2base(2)
    #   para1: g2b | para2: c2t | output: t2g
    # Using calibrateCamera, we could get target2cam, therefore we need to calculate cam2target
    R_c2t_list = []
    t_c2t_list = []
    for i in range(len(R_t2c_list)):
        rvec = R_t2c_list[i]
        tvec = t_t2c_list[i]

        # from rotation vector to rotation matrix
        R, _ = cv2.Rodrigues(rvec)
        R = np.array(R)
        t = np.array(tvec)
        
        R_inv = np.linalg.inv(R)
        t_inv = -np.dot(R_inv, t)

        R_c2t_list.append(R_inv)
        t_c2t_list.append(t_inv)

    print('Solving AX=XB formular...')
    R_target2gripper, t_target2gripper = cv2.calibrateHandEye(
        R_g2b_list, t_g2b_list, 
        R_c2t_list, t_c2t_list
        )

    # print("R_target2gripper")
    # print(R_target2gripper)

    # print("t_target2gripper")
    # print(t_target2gripper)
    
    # We use the first configuration for test
    homo = np.array([0,0,0,1])

    R_c2t = R_c2t_list[0]
    t_c2t = t_c2t_list[0]
    T_c2t = np.block([[R_c2t, t_c2t]])
    T_c2t = np.block([[T_c2t], [homo]])
    # print(T_c2t)

    R_t2g = R_target2gripper
    t_t2g = t_target2gripper
    T_t2g = np.block([[R_t2g, t_t2g]])
    T_t2g = np.block([[T_t2g], [homo]])
    # print(T_t2g)
    
    R_g2b = R_g2b_list[0]
    t_g2b = t_g2b_list[0]
    T_g2b = np.block([[R_g2b, t_g2b]])
    T_g2b = np.block([[T_g2b], [homo]])
    # print(T_g2b)

    R_c2b = np.dot(np.dot(T_c2t, T_t2g), T_g2b)
    print('\nR_cam2base:')
    print(R_c2b)

    # base-frame axes
    bf = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[1,1,1,1]])
    cb = np.dot(R_c2b, bf)

    draw_axes(bf, cb)



    
