import json, pickle, os, random
import numpy as np
from pyquaternion import Quaternion
import cv2, tqdm
from scipy.spatial.transform import Rotation as R

def get_box_list(gt_samples):
    gt_box_list = []
    for sample in tqdm.tqdm(gt_samples, desc='Getting gt box...'):
        annos = sample['ann_infos']
        for anno in annos:
            gt_box_list.append(np.hstack((anno['translation'], anno['size'])))
    return gt_box_list

def gt_visualizer(gt_samples, SAVE_PATH, NUSCENES_PATH, SAMPLE_DATA_PATH):
    CAM_LIST = [
        'CAM_FRONT',
        'CAM_FRONT_LEFT',
        'CAM_FRONT_RIGHT',
        'CAM_BACK',
        'CAM_BACK_LEFT',
        'CAM_BACK_RIGHT'
    ]
    vertices_info_dict = {}
    for sample in tqdm.tqdm(gt_samples, desc='Plotting gt samples...'):

        sample_token = sample['sample_token']
        vertices_info_dict[sample_token] = {}
        scene_token = sample['scene_token']
        timestamp = sample['timestamp']
        cam_infos = sample['cam_infos']
        annos = sample['ann_infos']

        for anno in annos:
            instance_token = anno['instance_token']
            vertices_info_dict[sample_token][instance_token] = {}
            
            name = anno['category_name']
            w, l, h= anno['size']
            trans = anno['translation']
            rot = anno['rotation']

            # Object to World
            vertices_obj = np.array([
                [-l/2, -w/2, -h/2, 1],
                [l/2, -w/2, -h/2, 1],
                [l/2, w/2, -h/2, 1],
                [-l/2, w/2, -h/2, 1],
                [-l/2, -w/2, h/2, 1],
                [l/2, -w/2, h/2, 1],
                [l/2, w/2, h/2, 1],
                [-l/2, w/2, h/2, 1],
            ])
        
            obj_to_world_matrix = Quaternion(rot).rotation_matrix
            obj_to_world_matrix = np.hstack((obj_to_world_matrix, np.array(trans).reshape(3, 1)))
            obj_to_world_matrix = np.vstack((obj_to_world_matrix, np.array([0, 0, 0, 1])))
            vertices_world = (obj_to_world_matrix @ vertices_obj.T).T
            
            for CAM in CAM_LIST:
                
                # Get Camera Info
                cam_info = cam_infos[CAM]
                ego_pose = cam_info['ego_pose']
                ego_trans = ego_pose['translation']
                ego_rot = ego_pose['rotation']
        
                sensor_pose = cam_info['calibrated_sensor']
                sensor_trans = sensor_pose['translation']
                sensor_rot = sensor_pose['rotation']
                sensor_intr = sensor_pose['camera_intrinsic']
            
                file_name = os.path.join(NUSCENES_PATH, cam_info['filename'])
                
                # Ego to World
                ego_to_world_matrix = Quaternion(ego_rot).rotation_matrix
                ego_to_world_matrix = np.hstack((ego_to_world_matrix, np.array(ego_trans).reshape(3, 1)))
                ego_to_world_matrix = np.vstack((ego_to_world_matrix, np.array([0, 0, 0, 1])))

                # Sensor to Ego
                sensor_to_ego_matrix = Quaternion(sensor_rot).rotation_matrix
                sensor_to_ego_matrix = np.hstack((sensor_to_ego_matrix, np.array(sensor_trans).reshape(3, 1)))
                sensor_to_ego_matrix = np.vstack((sensor_to_ego_matrix, np.array([0, 0, 0, 1])))

                # Sensor to Projection
                intrinsic_matrix = np.hstack((np.array(sensor_intr), np.zeros((3, 1))))
                intrinsic_matrix = np.vstack((intrinsic_matrix, np.array([0, 0, 0, 1])))
                vertices_img = (intrinsic_matrix @ np.linalg.inv(sensor_to_ego_matrix) @ np.linalg.inv(ego_to_world_matrix) @ vertices_world.T).T
                
                # Filter
                if np.any(np.less(vertices_img[:, 2], 0)):
                    continue
                
                vertices_img[:,0] /= vertices_img[:,2] 
                vertices_img[:,1] /= vertices_img[:,2] 
                vertices_2d = vertices_img[:, :2] 
                
            
                vertices_info_dict[sample_token][instance_token][CAM] = {
                            'vertices': vertices_2d,
                            'class': name,
                            'filename': file_name,
                        }

    # Plot and save
    edges = [
            (0, 1),
            (1, 2),
            (2, 3),
            (3, 0),
            (4, 5),
            (5, 6),
            (6, 7),
            (7, 4),
            (0, 4),
            (1, 5),
            (3, 7),
            (2, 6)
        ]

    if not os.path.exists(os.path.join(SAVE_PATH, 'gt_sample_visual')):
        os.makedirs(os.path.join(SAVE_PATH, 'gt_sample_visual'), exist_ok=True)
    
    image_dict = {} 
    for sample_token, sample_vertices_dict in vertices_info_dict.items():    
        sample_path = os.path.join(SAVE_PATH, 'gt_sample_visual', sample_token)
        os.makedirs(sample_path, exist_ok=True) 
        if sample_token not in image_dict.keys():
            image_dict[sample_token] = {}
        for instance_token, instance_vertices_dict in sample_vertices_dict.items():
            for sensor, vertices_info in instance_vertices_dict.items():
                file_name = vertices_info['filename']
                if file_name not in image_dict[sample_token].keys():
                    image_dict[sample_token][file_name] = []
                image_dict[sample_token][file_name].append(vertices_info)
        
        for file_name, vertices_info_list in image_dict[sample_token].items(): 
            origin_img = cv2.imread(file_name)
            for vertices_info in vertices_info_list:
                vertices = vertices_info['vertices']
                class_name = vertices_info['class']
                # Plot edges on image
                for edge in edges:
                    p_1 = tuple(vertices[edge[0], :].astype(int))
                    p_2 = tuple(vertices[edge[1], :].astype(int))
                    origin_img = cv2.line(origin_img, p_1, p_2, color=(0, 0, 255), thickness=3)
                
                # Class annotation
                cv2.putText(origin_img, 
                            class_name, 
                            tuple(vertices[0, :].astype(int)), 
                            cv2.FONT_HERSHEY_SIMPLEX, 
                            1, (0, 255, 0), thickness=3)
                
            # Rescale
            scale_factor = 0.5
            width = int(origin_img.shape[1] * scale_factor)
            height = int(origin_img.shape[0] * scale_factor)
            scaled_dim = (width, height)
            img = cv2.resize(origin_img, scaled_dim, interpolation=cv2.INTER_AREA)
            
            # Save
            cam_channel = file_name.split('/')[-3]
            save_path = os.path.join(sample_path, f'{cam_channel}_{os.path.basename(file_name)}')
            cv2.imwrite(save_path, img)

                
                
                

            