import copy
import os

import MinkowskiEngine as ME
import numpy as np
import torch
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from PIL import Image
from pyquaternion import Quaternion
from torch.utils.data import Dataset

CUSTOM_SPLIT = [
    "scene-0008",
    "scene-0009",
    "scene-0019",
    "scene-0029",
    "scene-0032",
    "scene-0042",
    "scene-0045",
    "scene-0049",
    "scene-0052",
    "scene-0054",
    "scene-0056",
    "scene-0066",
    "scene-0067",
    "scene-0073",
    "scene-0131",
    "scene-0152",
    "scene-0166",
    "scene-0168",
    "scene-0183",
    "scene-0190",
    "scene-0194",
    "scene-0208",
    "scene-0210",
    "scene-0211",
    "scene-0241",
    "scene-0243",
    "scene-0248",
    "scene-0259",
    "scene-0260",
    "scene-0261",
    "scene-0287",
    "scene-0292",
    "scene-0297",
    "scene-0305",
    "scene-0306",
    "scene-0350",
    "scene-0352",
    "scene-0358",
    "scene-0361",
    "scene-0365",
    "scene-0368",
    "scene-0377",
    "scene-0388",
    "scene-0391",
    "scene-0395",
    "scene-0413",
    "scene-0427",
    "scene-0428",
    "scene-0438",
    "scene-0444",
    "scene-0452",
    "scene-0453",
    "scene-0459",
    "scene-0463",
    "scene-0464",
    "scene-0475",
    "scene-0513",
    "scene-0533",
    "scene-0544",
    "scene-0575",
    "scene-0587",
    "scene-0589",
    "scene-0642",
    "scene-0652",
    "scene-0658",
    "scene-0669",
    "scene-0678",
    "scene-0687",
    "scene-0701",
    "scene-0703",
    "scene-0706",
    "scene-0710",
    "scene-0715",
    "scene-0726",
    "scene-0735",
    "scene-0740",
    "scene-0758",
    "scene-0786",
    "scene-0790",
    "scene-0804",
    "scene-0806",
    "scene-0847",
    "scene-0856",
    "scene-0868",
    "scene-0882",
    "scene-0897",
    "scene-0899",
    "scene-0976",
    "scene-0996",
    "scene-1012",
    "scene-1015",
    "scene-1016",
    "scene-1018",
    "scene-1020",
    "scene-1024",
    "scene-1044",
    "scene-1058",
    "scene-1094",
    "scene-1098",
    "scene-1107",
]


def minkunet_collate_pair_fn(list_data):
    """
    Collate function adapted for creating batches with MinkowskiEngine.
    """
    (
        coords,
        feats,
        images,
        pairing_points,
        pairing_images,
        inverse_indexes,
        # superpixels,
        labels,
        evaluation_labels,
    ) = list(zip(*list_data))
    batch_n_points, batch_n_pairings = [], []
    len_batch = []

    offset = 0
    for batch_id in range(len(coords)):

        # Move batchids to the beginning
        coords[batch_id][:, 0] = batch_id
        pairing_points[batch_id][:] += offset
        pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]

        batch_n_points.append(coords[batch_id].shape[0])
        batch_n_pairings.append(pairing_points[batch_id].shape[0])
        offset += coords[batch_id].shape[0]
        len_batch.append(coords[batch_id].shape[0])

    # Concatenate all lists
    coords_batch = torch.cat(coords, 0).int()
    pairing_points = torch.tensor(np.concatenate(pairing_points))
    pairing_images = torch.tensor(np.concatenate(pairing_images))
    feats_batch = torch.cat(feats, 0).float()
    images_batch = torch.cat(images, 0).float()
    # superpixels_batch = torch.tensor(np.concatenate(superpixels))
    if all(l is not None for l in labels) and all(
        l is not None for l in evaluation_labels
    ):
        labels_batch = torch.cat(labels, 0).long()
        evaluation_labels_batch = torch.cat(evaluation_labels, 0).long()
    else:
        labels_batch = evaluation_labels_batch = None

    return {
        "sinput_C": coords_batch,
        "sinput_F": feats_batch,
        "input_I": images_batch,
        "pairing_points": pairing_points,
        "pairing_images": pairing_images,
        "batch_n_pairings": batch_n_pairings,
        "inverse_indexes": inverse_indexes,
        # "superpixels": superpixels_batch,
        "labels": labels_batch,  # labels for each (voxelized) point
        "evaluation_labels": evaluation_labels_batch,  # labels for each point
        "len_batch": len_batch,
    }


class NuScenesMatchDataset(Dataset):
    """
    Dataset matching a 3D points cloud and an image using projection.
    """

    def __init__(
        self,
        phase,
        config,
        shuffle=False,
        cloud_transforms=None,
        mixed_transforms=None,
        **kwargs,
    ):
        self.phase = phase
        self.shuffle = shuffle
        self.cloud_transforms = cloud_transforms
        self.mixed_transforms = mixed_transforms
        self.voxel_size = config["voxel_size"]
        self.cylinder = config["cylindrical_coordinates"]
        # self.superpixels_type = config["superpixels_type"]
        self.labels = config["use_hard_labels"] and self.phase != "test"

        if self.phase != "test":
            if "cached_nuscenes" in kwargs:
                self.nusc = kwargs["cached_nuscenes"]
            else:
                self.nusc = NuScenes(
                    version="v1.0-trainval", dataroot="datasets/nuscenes", verbose=False
                )
        else:
            self.nusc = NuScenes(
                version="v1.0-test", dataroot="datasets/nuscenes", verbose=False
            )

        self.list_keyframes = []
        # a skip ratio can be used to reduce the dataset size and accelerate experiments
        try:
            skip_ratio = config["dataset_skip_step"]
        except KeyError:
            skip_ratio = 1
        skip_counter = 0
        if phase in ("train", "val", "test"):
            phase_scenes = create_splits_scenes()[phase]
        elif phase == "parametrizing":
            phase_scenes = list(
                set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
            )
        elif phase == "verifying":
            phase_scenes = CUSTOM_SPLIT
        # create a list of camera & lidar scans
        for scene_idx in range(len(self.nusc.scene)):
            scene = self.nusc.scene[scene_idx]
            if scene["name"] in phase_scenes:
                skip_counter += 1
                if skip_counter % skip_ratio == 0:
                    self.create_list_of_scans(scene)

    def create_list_of_scans(self, scene):
        """Get first and last keyframe in the scene"""

        current_sample_token = scene["first_sample_token"]
        # Loop to get all successive keyframes
        list_data = []
        while current_sample_token != "":
            current_sample = self.nusc.get("sample", current_sample_token)
            list_data.append(current_sample["data"])
            current_sample_token = current_sample["next"]

        # Add new scans in this scene into the golbal list
        self.list_keyframes.extend(list_data)

    def map_pointcloud_to_image(self, data, min_dist: float = 1.0):
        """
        Given a lidar token and camera sample_data token, load pointcloud and map it to
        the image plane. Code adapted from nuscenes-devkit
        https://github.com/nutonomy/nuscenes-devkit.
        :param min_dist: Distance from the camera below which points are discarded.
        """
        pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
        pcl_path = os.path.join(self.nusc.dataroot, pointsensor["filename"])
        pc_original = LidarPointCloud.from_file(pcl_path)
        pc_ref = pc_original.points  # ((x,y,z,intensity:0-255),n)
        # pc_ref[3][0] = 0
        # pc_ref[3][1] = 1
        # pc_ref[3][2] = 2
        # pc_ref[3][3] = 3
        # pc_ref[3][4] = 4

        if self.labels:
            lidarseg_labels_filename = os.path.join(
                self.nusc.dataroot,
                self.nusc.get("lidarseg", data["LIDAR_TOP"])["filename"],
            )
            point_labels = np.fromfile(lidarseg_labels_filename, dtype=np.uint8)
        else:
            point_labels = None

        images = []
        # superpixels = []
        pairing_points = np.empty(0, dtype=np.int64)
        pairing_images = np.empty((0, 3), dtype=np.int64)
        camera_list = [
            "CAM_FRONT",
            "CAM_FRONT_RIGHT",
            "CAM_BACK_RIGHT",
            "CAM_BACK",
            "CAM_BACK_LEFT",
            "CAM_FRONT_LEFT",
        ]
        if self.shuffle:
            np.random.shuffle(camera_list)
        for i, camera_name in enumerate(camera_list):
            pc = copy.deepcopy(pc_original)
            cam = self.nusc.get("sample_data", data[camera_name])
            im = np.array(Image.open(os.path.join(self.nusc.dataroot, cam["filename"])))
            # sp = Image.open(
            #     f"superpixels/nuscenes/"
            #     f"superpixels_{self.superpixels_type}/{cam['token']}.png"
            # )
            # superpixels.append(np.array(sp))

            # Points live in the point sensor frame. So they need to be transformed via
            # global to the image plane.
            # First step: transform the pointcloud to the ego vehicle frame for the
            # timestamp of the sweep.
            cs_record = self.nusc.get(
                "calibrated_sensor", pointsensor["calibrated_sensor_token"]
            )
            pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix)
            pc.translate(np.array(cs_record["translation"]))

            # Second step: transform from ego to the global frame.
            poserecord = self.nusc.get("ego_pose", pointsensor["ego_pose_token"])
            pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix)
            pc.translate(np.array(poserecord["translation"]))

            # Third step: transform from global into the ego vehicle frame for the
            # timestamp of the image.
            poserecord = self.nusc.get("ego_pose", cam["ego_pose_token"])
            pc.translate(-np.array(poserecord["translation"]))
            pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix.T)

            # Fourth step: transform from ego into the camera.
            cs_record = self.nusc.get(
                "calibrated_sensor", cam["calibrated_sensor_token"]
            )
            pc.translate(-np.array(cs_record["translation"]))
            pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix.T)

            # Fifth step: actually take a "picture" of the point cloud.
            # Grab the depths (camera frame z axis points away from the camera).
            depths = pc.points[2, :]

            # Take the actual picture
            # (matrix multiplication with camera-matrix + renormalization).
            points = view_points(
                pc.points[:3, :],
                np.array(cs_record["camera_intrinsic"]),
                normalize=True,
            )  # depth all set to 1.0, thus should be discarded

            # Remove points that are either outside or behind the camera.
            # Also make sure points are at least 1m in front of the camera to avoid
            # seeing the lidar points on the camera
            # casing for non-keyframes which are slightly out of sync.
            points = points[:2].T
            mask = np.ones(depths.shape[0], dtype=bool)  # shape: (n,)
            mask = np.logical_and(mask, depths > min_dist)
            mask = np.logical_and(mask, points[:, 0] > 0)
            mask = np.logical_and(mask, points[:, 0] < im.shape[1] - 1)
            mask = np.logical_and(mask, points[:, 1] > 0)
            mask = np.logical_and(mask, points[:, 1] < im.shape[0] - 1)
            matching_points = np.where(mask)[0]  # row indexes of Trues in mask
            matching_pixels = np.round(
                np.flip(points[matching_points], axis=1)  # (matched num,(y,x))
            ).astype(
                np.int64
            )  # select the pixels corresponded to point cloud projections

            images.append(im / 255)
            pairing_points = np.concatenate((pairing_points, matching_points))
            pairing_images = np.concatenate(
                (
                    pairing_images,
                    np.concatenate(
                        (
                            np.ones((matching_pixels.shape[0], 1), dtype=np.int64) * i,
                            matching_pixels,
                        ),
                        axis=1,
                    ),  # (matched num,(camera index,y,x))
                )
            )  # vstack
        # return pc_ref.T, images, pairing_points, pairing_images, np.stack(superpixels)
        return pc_ref.T, images, pairing_points, pairing_images, point_labels

    def __len__(self):
        return len(self.list_keyframes)

    def __getitem__(self, idx):
        (
            pc,  # (n,(x,y,z,intensity:0-255))
            images,  # (len(camera_list),h,w,channels):0.-1.
            pairing_points,  # (matched num among len(camera_list) imgs,)
            pairing_images,  # (matched num ... ,(camera index,y,x))
            # superpixels,
            point_labels,
        ) = self.map_pointcloud_to_image(self.list_keyframes[idx])
        # superpixels = torch.tensor(superpixels)  # (len(camera_list),h,w)

        intensity = torch.tensor(pc[:, 3:])  # intensity:0.-255.
        pc = torch.tensor(pc[:, :3])  # (n,(x,y,z))
        images = torch.tensor(  # (len(camera_list),channels,h,w):0.-1.
            np.array(images, dtype=np.float32).transpose(0, 3, 1, 2)
        )

        if self.cloud_transforms:
            pc = self.cloud_transforms(pc)
        # if self.mixed_transforms:
        #     (
        #         pc,
        #         intensity,
        #         images,
        #         pairing_points,
        #         pairing_images,
        #         superpixels,
        #     ) = self.mixed_transforms(
        #         pc, intensity, images, pairing_points, pairing_images, superpixels
        #     )
        if self.mixed_transforms:
            (
                pc,
                intensity,
                images,
                pairing_points,
                pairing_images,
            ) = self.mixed_transforms(
                pc, intensity, images, pairing_points, pairing_images
            )

        if self.cylinder:
            # Transform to cylinder coordinate and scale for voxel size
            x, y, z = pc.T
            rho = torch.sqrt(x**2 + y**2) / self.voxel_size
            # corresponds to a split each 1°
            phi = torch.atan2(y, x) * 180 / np.pi  # (-180,180]
            z = z / self.voxel_size
            coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
        else:
            coords_aug = pc / self.voxel_size

        # Voxelization with MinkowskiEngine
        discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
            coords_aug.contiguous(), return_index=True, return_inverse=True
        )  # may reduce the number of points
        # indexes here are the indexes of points kept after the voxelization
        pairing_points = inverse_indexes[pairing_points]

        unique_feats = intensity[indexes]  # use intensities as features

        # labels' names lookup table
        # self.eval_labels = {
        #     0: 0, 1: 0, 2: 7, 3: 7, 4: 7, 5: 0, 6: 7, 7: 0, 8: 0, 9: 1, 10: 0, 11: 0,
        #     12: 8, 13: 0, 14: 2, 15: 3, 16: 3, 17: 4, 18: 5, 19: 0, 20: 0, 21: 6, 22: 9,
        #     23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: 0, 30: 16, 31: 0,
        # } # 25: 12, 26: 13, 27: 14, 28: 15, 29: 0, 30: 16 are not appeared in images
        self.eval_labels = {
            0: 0,
            1: 0,
            2: 7,
            3: 7,
            4: 7,
            5: 0,
            6: 7,
            7: 0,
            8: 0,
            9: 1,
            10: 0,
            11: 0,
            12: 8,
            13: 0,
            14: 2,
            15: 3,
            16: 3,
            17: 4,
            18: 5,
            19: 0,
            20: 0,
            21: 6,
            22: 9,
            23: 10,
            24: 11,
            25: 0,
            26: 0,
            27: 0,
            28: 0,
            29: 0,
            30: 0,
            31: 0,
        }
        # self.eval_labels = {
        #     0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11,
        #     12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22,
        #     23: 23, 24: 24, 25: 0, 26: 0, 27: 0, 28: 0, 29: 0, 30: 0, 31: 0,
        # }

        """
        nuScenes labels:                             eval labels:
        0   "noise"                                  0  "noise"
        1	"animal"                                 1	"barrier",
        2	"human.pedestrian.adult"                 2	"bicycle",
        3	"human.pedestrian.child"                 3	"bus",
        4	"human.pedestrian.construction_worker"   4	"car",
        5	"human.pedestrian.personal_mobility"     5	"construction_vehicle",
        6	"human.pedestrian.police_officer"        6	"motorcycle",
        7	"human.pedestrian.stroller"              7	"pedestrian",
        8	"human.pedestrian.wheelchair"            8	"traffic_cone",
        9	"movable_object.barrier"                 9	"trailer",
        10	"movable_object.debris"                  10	"truck",
        11	"movable_object.pushable_pullable"       11	"driveable_surface",
        12	"movable_object.trafficcone"             12	"other_flat",
        13	"static_object.bicycle_rack"             13	"sidewalk",
        14	"vehicle.bicycle"                        14	"terrain",
        15	"vehicle.bus.bendy"                      15	"manmade",
        16	"vehicle.bus.rigid"                      16	"vegetation",
        17	"vehicle.car"
        18	"vehicle.construction"
        19	"vehicle.emergency.ambulance"
        20	"vehicle.emergency.police"
        21	"vehicle.motorcycle"
        22	"vehicle.trailer"
        23	"vehicle.truck"
        24	"flat.driveable_surface"
        25	"flat.other"
        26	"flat.sidewalk"
        27	"flat.terrain"
        28	"static.manmade"
        29	"static.other"
        30	"static.vegetation"
        31	"vehicle.ego"
        """

        if point_labels is not None:
            point_labels = torch.tensor(
                np.vectorize(self.eval_labels.__getitem__)(point_labels),
                dtype=torch.int32,
            )
            # point_labels = torch.tensor(point_labels, dtype=torch.int32)
            unique_labels = point_labels[indexes]
        else:
            unique_labels = None

        discrete_coords = torch.cat(
            (
                torch.zeros(discrete_coords.shape[0], 1, dtype=torch.int32),
                discrete_coords,
            ),
            1,
        )  # row 0 is all 0

        return (
            discrete_coords,  # (n,(0,rho,phi,z))
            unique_feats,  # intensities
            images,  # (len(camera_list),channels,h,w):0.-1.
            pairing_points,  # (matched num among len(camera_list) imgs,)
            pairing_images,  # (matched num ... ,(camera index,y,x))
            inverse_indexes,  # indices that can recover original coordinates.
            # superpixels,  # (len(camera_list),h,w), elements: label idx
            unique_labels,
            point_labels,
        )
