import json, pickle, os, random
import numpy as np
from pyquaternion import Quaternion
import cv2, tqdm
from scipy.spatial.transform import Rotation as R

# Load nuscenes info
def nuscenes_info_loader(NUSCENES_PATH, SAMPLE_DATA_PATH):
    # Sensor
    with open(os.path.join(NUSCENES_PATH, 'v1.0/sensor.json'), 'r') as f:
        sensor_data = json.load(f)
        sensor_channel_dict = {}
        for sensor in sensor_data:
            sensor_channel_dict[sensor['token']] = sensor['channel']
    # Calibrate sensor
    with open(os.path.join(NUSCENES_PATH, 'v1.0/calibrated_sensor.json'), 'r') as f:
        calibrated_sensor_data = json.load(f)
        calibrated_sensor_dict = {}
        for calibrated_sensor in calibrated_sensor_data:
            calibrated_sensor_dict[calibrated_sensor['token']] = {
                'channel': sensor_channel_dict[calibrated_sensor['sensor_token']],
                'translation': calibrated_sensor['translation'],
                'rotation': calibrated_sensor['rotation'],
                'intrinsic': calibrated_sensor['camera_intrinsic']
            }
    # Ego pose
    with open(os.path.join(NUSCENES_PATH, 'v1.0/ego_pose.json'), 'r') as f:
        ego_pose_data = json.load(f)
        ego_pose_dict = {}
        for ego_pose in ego_pose_data:
            ego_pose_dict[ego_pose['token']] = {
                'rotation': ego_pose['rotation'],
                'translation': ego_pose['translation']
            }
    # Sample data
    with open(os.path.join(NUSCENES_PATH, 'v1.0/sample_data.json'), 'r') as f:
        sample_data = json.load(f)
    # Build sample info dict
    sample_info_dict = {}
    for sample in tqdm.tqdm(sample_data, desc='Loading Nuscenes info...'):
        sample_token = sample['sample_token']
        
        ego_pose_token = sample['ego_pose_token']
        ego_pose = ego_pose_dict[ego_pose_token]
        
        calibrated_sensor_token = sample['calibrated_sensor_token']
        sensor_pose = calibrated_sensor_dict[calibrated_sensor_token]
        sensor_channel = sensor_pose['channel']
        
        filename = os.path.join(NUSCENES_PATH, sample['filename'])
        if not filename.endswith('jpeg'):
            continue
        filename = os.path.join(SAMPLE_DATA_PATH, os.readlink(filename))
            
        if sample_token not in sample_info_dict.keys():
            sample_info_dict[sample_token] = {}
        sample_info_dict[sample_token][sensor_channel] = {
            'ego_pose': ego_pose,
            'sensor_pose': sensor_pose,
            'filename': filename
        }
    return sample_info_dict

def pred_visualizer(NUSCENES_PATH, SAMPLE_DATA_PATH, SAVE_PATH, sample_annos_dict, sample_info_dict):
    CAM_LIST = [
        'CAM_FRONT',
        'CAM_FRONT_LEFT',
        'CAM_FRONT_RIGHT',
        'CAM_BACK',
        'CAM_BACK_LEFT',
        'CAM_BACK_RIGHT'
    ]
    vertices_dict = {}
    for sample_token, annos_list in tqdm.tqdm(sample_annos_dict.items(), desc='Plotting pred samples...'):
        sensor_info_dict = sample_info_dict[sample_token]
        vertices_dict[sample_token] = {}
        for anno in annos_list:
            w, l, h= anno['size']
            trans = anno['translation']
            rot = anno['rotation']
            detection_name = anno['detection_name']
            detection_score = round(anno['detection_score'], 2)
            if detection_score <= 0.5:
                continue
            detection_info = f'{detection_name} {detection_score}'
            
            # Object to World
            vertices_obj = np.array([
                [-l/2, -w/2, -h/2, 1],
                [l/2, -w/2, -h/2, 1],
                [l/2, w/2, -h/2, 1],
                [-l/2, w/2, -h/2, 1],
                [-l/2, -w/2, h/2, 1],
                [l/2, -w/2, h/2, 1],
                [l/2, w/2, h/2, 1],
                [-l/2, w/2, h/2, 1],
            ])
        
            obj_to_world_matrix = Quaternion(rot).rotation_matrix
            obj_to_world_matrix = np.hstack((obj_to_world_matrix, np.array(trans).reshape(3, 1)))
            obj_to_world_matrix = np.vstack((obj_to_world_matrix, np.array([0, 0, 0, 1])))
            vertices_world = (obj_to_world_matrix @ vertices_obj.T).T
            
            for CAM in CAM_LIST:
                
                cam_info = sensor_info_dict[CAM]
                ego_pose = cam_info['ego_pose']
                ego_trans = ego_pose['translation']
                ego_rot = ego_pose['rotation']
        
                sensor_pose = cam_info['sensor_pose']
                sensor_trans = sensor_pose['translation']
                sensor_rot = sensor_pose['rotation']
                sensor_intr = sensor_pose['intrinsic']
            
                filename = cam_info['filename']
                
                # Ego to World
                ego_to_world_matrix = Quaternion(ego_rot).rotation_matrix
                ego_to_world_matrix = np.hstack((ego_to_world_matrix, np.array(ego_trans).reshape(3, 1)))
                ego_to_world_matrix = np.vstack((ego_to_world_matrix, np.array([0, 0, 0, 1])))

                # Sensor to Ego
                sensor_to_ego_matrix = Quaternion(sensor_rot).rotation_matrix
                sensor_to_ego_matrix = np.hstack((sensor_to_ego_matrix, np.array(sensor_trans).reshape(3, 1)))
                sensor_to_ego_matrix = np.vstack((sensor_to_ego_matrix, np.array([0, 0, 0, 1])))

                # Sensor to Projection
                intrinsic_matrix = np.hstack((np.array(sensor_intr), np.zeros((3, 1))))
                intrinsic_matrix = np.vstack((intrinsic_matrix, np.array([0, 0, 0, 1])))
                vertices_img = (intrinsic_matrix @ np.linalg.inv(sensor_to_ego_matrix) @ np.linalg.inv(ego_to_world_matrix) @ vertices_world.T).T
                
                # Filter
                if np.any(np.less(vertices_img[:, 2], 0)):
                    continue
                
                vertices_img[:,0] /= vertices_img[:,2] 
                vertices_img[:,1] /= vertices_img[:,2] 
                vertices_2d = vertices_img[:, :2] 

                if filename not in vertices_dict[sample_token].keys():
                    vertices_dict[sample_token][filename] = []
                vertices_dict[sample_token][filename].append({
                    'vertices': vertices_2d,
                    'detection_info': detection_info
                })

    # Plot
    edges = [
            (0, 1),
            (1, 2),
            (2, 3),
            (3, 0),
            (4, 5),
            (5, 6),
            (6, 7),
            (7, 4),
            (0, 4),
            (1, 5),
            (3, 7),
            (2, 6)
        ]

    if not os.path.exists(os.path.join(SAVE_PATH, 'pred_sample_visual')):
        os.makedirs(os.path.join(SAVE_PATH, 'pred_sample_visual'), exist_ok=True)

    for sample_token, sample_vertices_dict in vertices_dict.items():
        save_root = os.path.join(SAVE_PATH, 'pred_sample_visual', sample_token)
        if not os.path.exists(save_root):
            os.makedirs(save_root, exist_ok=True)
        for filename, vertices_info_list in sample_vertices_dict.items():
            CAM = filename.split('/')[-3]
            save_path = os.path.join(save_root, f'{CAM}_{os.path.basename(filename)}')
            origin_img = cv2.imread(filename)
            for vertices_info in vertices_info_list:
                vertices = vertices_info['vertices']
                detection_info = vertices_info['detection_info']
                # Plot edges on image
                for edge in edges:
                    p_1 = tuple(vertices[edge[0], :].astype(int))
                    p_2 = tuple(vertices[edge[1], :].astype(int))
                    origin_img = cv2.line(origin_img, p_1, p_2, color=(0, 0, 255), thickness=3)
                
                # Class annotation
                cv2.putText(origin_img, 
                            detection_info, 
                            tuple(vertices[0, :].astype(int)), 
                            cv2.FONT_HERSHEY_SIMPLEX, 
                            1, (0, 255, 0), thickness=3)
                
            # Rescale
            scale_factor = 0.5
            width = int(origin_img.shape[1] * scale_factor)
            height = int(origin_img.shape[0] * scale_factor)
            scaled_dim = (width, height)
            img = cv2.resize(origin_img, scaled_dim, interpolation=cv2.INTER_AREA)
            
            # Save
            cv2.imwrite(save_path, img)

        