import json, pickle, os, random
import numpy as np
from pyquaternion import Quaternion
import cv2, tqdm
from scipy.spatial.transform import Rotation as R

# Calculate 3D iou, box in [x, y, z, w, h, l] format
def compute_3d_iou(box1, box2):
    box1_min = [box1[0] - box1[3] / 2, box1[1] - box1[4] / 2, box1[2] - box1[5] / 2]
    box1_max = [box1[0] + box1[3] / 2, box1[1] + box1[4] / 2, box1[2] + box1[5] / 2]
    box2_min = [box2[0] - box2[3] / 2, box2[1] - box2[4] / 2, box2[2] - box2[5] / 2]
    box2_max = [box2[0] + box2[3] / 2, box2[1] + box2[4] / 2, box2[2] + box2[5] / 2]
    # calculate inter
    inter_min = [max(box1_min[i], box2_min[i]) for i in range(3)]
    inter_max = [min(box1_max[i], box2_max[i]) for i in range(3)]
    inter_volume = max(0, inter_max[0] - inter_min[0]) * max(0, inter_max[1] - inter_min[1]) * max(0, inter_max[2] - inter_min[2])  
    if inter_volume == 0:
        return 0
    # calculate union
    box1_volume = (box1_max[0] - box1_min[0]) * (box1_max[1] - box1_min[1]) * (box1_max[2] - box1_min[2])
    box2_volume = (box2_max[0] - box2_min[0]) * (box2_max[1] - box2_min[1]) * (box2_max[2] - box2_min[2])
    union_volume = box1_volume + box2_volume - inter_volume
    # calculate iou
    iou = inter_volume / union_volume if union_volume > 0 else 0
    return iou

def calculate_iou(gt_box_list, pred_box):
    max_iou = 0
    for gt_box in gt_box_list:
        iou = compute_3d_iou(gt_box, pred_box)
        if iou > max_iou:
            max_iou = iou
    return max_iou

def fp_filter(NUSCENES_PATH, 
              SAMPLE_DATA_PATH, 
              SAVE_PATH, 
              sample_annos_dict, 
              sample_info_dict,
              gt_box_list, 
              iou_threshold):
    CAM_LIST = [
        'CAM_FRONT',
        'CAM_FRONT_LEFT',
        'CAM_FRONT_RIGHT',
        'CAM_BACK',
        'CAM_BACK_LEFT',
        'CAM_BACK_RIGHT'
    ]
    vertices_dict = {}
    for sample_token, annos_list in tqdm.tqdm(sample_annos_dict.items(), desc='fp filtering...'):
        sensor_info_dict = sample_info_dict[sample_token]
        vertices_dict[sample_token] = {}
        for anno in annos_list:
            w, l, h= anno['size']
            trans = anno['translation']
            rot = anno['rotation']
            detection_name = anno['detection_name']
            detection_score = round(anno['detection_score'], 2)
            
            # annotation color
            pred_bbox = np.hstack((anno['translation'], anno['size']))
            iou_score = calculate_iou(gt_box_list, pred_bbox)
            if iou_score > iou_threshold:
                box_color = (0, 255, 0)
            else:
                box_color = (0, 0, 255)
            
            # confidence filter
            if detection_score <= 0.5:
                continue
            detection_info = f'{detection_name} {detection_score}'
            
            # Object to World
            vertices_obj = np.array([
                [-l/2, -w/2, -h/2, 1],
                [l/2, -w/2, -h/2, 1],
                [l/2, w/2, -h/2, 1],
                [-l/2, w/2, -h/2, 1],
                [-l/2, -w/2, h/2, 1],
                [l/2, -w/2, h/2, 1],
                [l/2, w/2, h/2, 1],
                [-l/2, w/2, h/2, 1],
            ])
        
            obj_to_world_matrix = Quaternion(rot).rotation_matrix
            obj_to_world_matrix = np.hstack((obj_to_world_matrix, np.array(trans).reshape(3, 1)))
            obj_to_world_matrix = np.vstack((obj_to_world_matrix, np.array([0, 0, 0, 1])))
            vertices_world = (obj_to_world_matrix @ vertices_obj.T).T
            
            for CAM in CAM_LIST:
                
                cam_info = sensor_info_dict[CAM]
                ego_pose = cam_info['ego_pose']
                ego_trans = ego_pose['translation']
                ego_rot = ego_pose['rotation']
        
                sensor_pose = cam_info['sensor_pose']
                sensor_trans = sensor_pose['translation']
                sensor_rot = sensor_pose['rotation']
                sensor_intr = sensor_pose['intrinsic']
            
                filename = cam_info['filename']
                
                # Ego to World
                ego_to_world_matrix = Quaternion(ego_rot).rotation_matrix
                ego_to_world_matrix = np.hstack((ego_to_world_matrix, np.array(ego_trans).reshape(3, 1)))
                ego_to_world_matrix = np.vstack((ego_to_world_matrix, np.array([0, 0, 0, 1])))

                # Sensor to Ego
                sensor_to_ego_matrix = Quaternion(sensor_rot).rotation_matrix
                sensor_to_ego_matrix = np.hstack((sensor_to_ego_matrix, np.array(sensor_trans).reshape(3, 1)))
                sensor_to_ego_matrix = np.vstack((sensor_to_ego_matrix, np.array([0, 0, 0, 1])))

                # Sensor to Projection
                intrinsic_matrix = np.hstack((np.array(sensor_intr), np.zeros((3, 1))))
                intrinsic_matrix = np.vstack((intrinsic_matrix, np.array([0, 0, 0, 1])))
                vertices_img = (intrinsic_matrix @ np.linalg.inv(sensor_to_ego_matrix) @ np.linalg.inv(ego_to_world_matrix) @ vertices_world.T).T
                
                # Filter
                if np.any(np.less(vertices_img[:, 2], 0)):
                    continue
                
                vertices_img[:,0] /= vertices_img[:,2] 
                vertices_img[:,1] /= vertices_img[:,2] 
                vertices_2d = vertices_img[:, :2] 

                if filename not in vertices_dict[sample_token].keys():
                    vertices_dict[sample_token][filename] = []
                vertices_dict[sample_token][filename].append({
                    'vertices': vertices_2d,
                    'detection_info': detection_info,
                    'box_color': box_color
                })

    # Plot
    edges = [
            (0, 1),
            (1, 2),
            (2, 3),
            (3, 0),
            (4, 5),
            (5, 6),
            (6, 7),
            (7, 4),
            (0, 4),
            (1, 5),
            (3, 7),
            (2, 6)
        ]

    if not os.path.exists(os.path.join(SAVE_PATH, 'iou_filter')):
        os.makedirs(os.path.join(SAVE_PATH, 'iou_filter'), exist_ok=True)

    for sample_token, sample_vertices_dict in vertices_dict.items():
        save_root = os.path.join(SAVE_PATH, 'iou_filter', sample_token)
        if not os.path.exists(save_root):
            os.makedirs(save_root, exist_ok=True)
        for filename, vertices_info_list in sample_vertices_dict.items():
            CAM = filename.split('/')[-3]
            save_path = os.path.join(save_root, f'{CAM}_{os.path.basename(filename)}')
            origin_img = cv2.imread(filename)
            for vertices_info in vertices_info_list:
                vertices = vertices_info['vertices']
                detection_info = vertices_info['detection_info']
                box_color = vertices_info['box_color']
                # Plot edges on image
                for edge in edges:
                    p_1 = tuple(vertices[edge[0], :].astype(int))
                    p_2 = tuple(vertices[edge[1], :].astype(int))
                    origin_img = cv2.line(origin_img, p_1, p_2, color=box_color, thickness=3)
                
                # Class annotation
                cv2.putText(origin_img, 
                            detection_info, 
                            tuple(vertices[0, :].astype(int)), 
                            cv2.FONT_HERSHEY_SIMPLEX, 
                            1, (255, 0, 0), thickness=3)
                
            # Rescale
            scale_factor = 0.5
            width = int(origin_img.shape[1] * scale_factor)
            height = int(origin_img.shape[0] * scale_factor)
            scaled_dim = (width, height)
            img = cv2.resize(origin_img, scaled_dim, interpolation=cv2.INTER_AREA)
            
            # Save
            cv2.imwrite(save_path, img)

        