r"""Adapted from `Waymo to KITTI converter
    <https://github.com/caizhongang/waymo_kitti_converter>`_.
"""
# TODO: 

try:
    from waymo_open_dataset import dataset_pb2
except ImportError:
    raise ImportError(
        'Please run "pip install waymo-open-dataset-tf-2-1-0==1.2.0" '
        'to install the official devkit first.')


from struct import pack
from collections import Iterable
import mmcv
import numpy as np
import tensorflow as tf
from glob import glob
from os.path import join
from waymo_open_dataset.utils import range_image_utils, transform_utils
from waymo_open_dataset.utils.frame_utils import \
    parse_range_image_and_camera_projection


class Waymo4GS(object):
    """Waymo to KITTI converter.

    This class serves as the converter to change the waymo raw data to KITTI
    format.

    Args:
        load_dir (str): Directory to load waymo raw data.
        save_dir (str): Directory to save data in KITTI format.
        prefix (str): Prefix of filename. In general, 0 for training, 1 for
            validation and 2 for testing.
        workers (str): Number of workers for the parallel process.
        test_mode (bool): Whether in the test_mode. Default: False.
    """

    def __init__(self,
                 load_dir,
                 save_dir,
                 workers=64,
                 test_mode=False):
        self.filter_empty_3dboxes = True
        self.filter_no_label_zone_points = True

        self.selected_waymo_classes = ['VEHICLE', 'PEDESTRIAN', 'CYCLIST']

        # Only data collected in specific locations will be converted
        # If set None, this filter is disabled
        # Available options: location_sf (main dataset)
        self.selected_waymo_locations = None
        self.save_track_id = False

        # turn on eager execution for older tensorflow versions
        if int(tf.__version__.split('.')[0]) < 2:
            tf.enable_eager_execution()

        # self.lidar_list = [ should be cam list
        #     '_FRONT', '_FRONT_RIGHT', '_FRONT_LEFT', '_SIDE_RIGHT',
        #     '_SIDE_LEFT'
        # ]
        # self.type_list = [
        #     'UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST'
        # ]
        # self.waymo_to_kitti_class_map = {
        #     'UNKNOWN': 'DontCare',
        #     'PEDESTRIAN': 'Pedestrian',
        #     'VEHICLE': 'Car',
        #     'CYCLIST': 'Cyclist',
        #     'SIGN': 'Sign'  # not in kitti
        # }

        self.load_dir = load_dir
        self.save_dir = save_dir
        self.workers = int(workers)
        self.test_mode = test_mode

        self.tfrecord_pathnames = sorted(
            glob(join(self.load_dir, '*.tfrecord')))
        if len(self.tfrecord_pathnames) != 1:
            assert False, "Only one segment can be handled."

        self.image_save_dir = f'{self.save_dir}/images'
        self.intrinstic_save_dir = f'{self.save_dir}/intrinstic'
        self.extrinstic_save_dir = f'{self.save_dir}/extrinstic'
        self.point_cloud_save_dir = f'{self.save_dir}/point_cloud'
        self.pose_save_dir = f'{self.save_dir}/pose'
        self.lidar_bound_save_dir = f'{self.save_dir}/lidar_bound'
        self.distort_save_dir = f'{self.save_dir}/distort'

        self.create_folder()

    def convert(self):
        """Convert action."""
        print('Start converting ...')
        mmcv.track_parallel_progress(self.convert_one, range(len(self)),
                                     self.workers)
        print('\nFinished ...')

    def convert_one(self, file_idx):
        """Convert action for single file.

        Args:
            file_idx (int): Index of the file to be converted.
        """
        pathname = self.tfrecord_pathnames[file_idx]
        dataset = tf.data.TFRecordDataset(pathname, compression_type='')

        for frame_idx, data in enumerate(dataset):

            frame = dataset_pb2.Frame()
            frame.ParseFromString(bytearray(data.numpy()))
            if (self.selected_waymo_locations is not None
                    and frame.context.stats.location
                    not in self.selected_waymo_locations):
                continue

            self.save_image(frame, frame_idx)
            # self.save_calib(frame, file_idx, frame_idx)
            self.save_extrinstics_and_pose(frame, frame_idx)
            self.save_intrinstics(frame, frame_idx)
            self.save_lidar(frame, frame_idx)
            # self.save_pose(frame, file_idx, frame_idx)

            # if not self.test_mode:
            #     self.save_label(frame, file_idx, frame_idx)

    def __len__(self):
        """Length of the filename list."""
        return len(self.tfrecord_pathnames)

    def save_image(self, frame, frame_idx):
        """Parse and save the images in png format.

        Args:
            frame (:obj:`Frame`): Open dataset frame proto.
            file_idx (int): Current file index.
            frame_idx (int): Current frame index.
        """
        for img in frame.images:
            img_path = f'{self.image_save_dir}/' + \
                f'frame{str(frame_idx).zfill(3)}_' + \
                f'cam{str(img.name - 1)}.png'
            img = mmcv.imfrombytes(img.image)
            mmcv.imwrite(img, img_path)

    def check_differences(self, frame):
        intrinstic0 = frame.context.camera_calibrations.intrinsic[:4]
        intrinstic = np.zeros(4)
        for camera in frame.context.camera_calibrations:
            # intrinsic parameters
            intrinstic[:] = camera.intrinsic[:4]
            if (intrinstic != intrinstic0).all():
                assert False, "Different intrinstics: How did this happen?"
    
    def save_intrinstics(self, frame, frame_idx):
        """Save the intrinstic data.

        Args:
            frame (:obj:`Frame`): Open dataset frame proto.
            file_idx (int): Current file index.
            frame_idx (int): Current frame index.
        """
        # self.check_differences(frame)
        for i, camera in enumerate(frame.context.camera_calibrations):
            # intrinsic parameters
            intrinstic = camera.intrinsic[:4]
            
            np.savetxt(
                f'{self.intrinstic_save_dir}/' + \
                f'frame{str(frame_idx).zfill(3)}_' + \
                f'cam{i}.txt',intrinstic)
            
            distort = camera.intrinsic[4:9]

            np.savetxt(
                f'{self.distort_save_dir}/' + \
                f'frame{str(frame_idx).zfill(3)}_' + \
                f'cam{i}.txt',distort)
            
            # np.savetxt(
            # join(f'{self.pose_save_dir}/{self.prefix}' +
            #      f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt'),
            # pose)
        # with open(
        #         f'{self.calib_save_dir}/{self.prefix}' +
        #         f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt',
        #         'w+') as fp_calib:

    def save_extrinstics_and_pose(self, frame, frame_idx):
        # T_vehicle_to_world = self.get_pose(frame)
        # T_world_to_vehicle = np.linalg.inv(T_vehicle_to_world)

        # for i, camera in enumerate(frame.context.camera_calibrations):
        # # extrinsic parameters
        #     T_cam_to_vehicle = np.array(camera.extrinsic.transform).reshape(
        #         4, 4)
        #     T_vehicle_to_cam = np.linalg.inv(T_cam_to_vehicle)

        #     extrinstic = np.matmul(T_vehicle_to_cam, T_world_to_vehicle)
        T_vehicle_to_world = self.get_pose(frame)
        np.savetxt(
                f'{self.pose_save_dir}/' + \
                f'frame{str(frame_idx).zfill(3)}.txt'
                , T_vehicle_to_world[:3, 3])
        
        for i, camera in enumerate(frame.context.camera_calibrations):
        # extrinsic parameters
            T_cam_to_vehicle = np.array(camera.extrinsic.transform).reshape(4, 4)
            c2w = np.matmul(T_vehicle_to_world, T_cam_to_vehicle)
            # rotation from waymo to colmap: (x, y, z) -> (-y, -z, x)
            # aaa = -c2w[:3, 1], -c2w[:3, 2], c2w[:3, 0]
            # c2w[:3, 0], c2w[:3, 1], c2w[:3, 2] = aaa不行：最后一个换不了
            rot = np.array([[0, 0, 1],
                            [-1, 0, 0],
                            [0, -1, 0]])
            c2w[:3, :3] = np.matmul(c2w[:3, :3], rot)
            
            extrinstic = np.linalg.inv(c2w)
            np.savetxt(
                f'{self.extrinstic_save_dir}/' + \
                f'frame{str(frame_idx).zfill(3)}_' + \
                f'cam{i}.txt',extrinstic)
            
    def save_lidar(self, frame, frame_idx):
        """Parse and save the lidar data in psd format.

        Args:
            frame (:obj:`Frame`): Open dataset frame proto.
            file_idx (int): Current file index.
            frame_idx (int): Current frame index.
        """
        range_images, camera_projections, range_image_top_pose = \
            parse_range_image_and_camera_projection(frame)
        
        # First return
        points_0, cp_points_0, intensity_0, elongation_0 = \
            self.convert_range_image_to_point_cloud(
                frame,
                range_images,
                camera_projections,
                range_image_top_pose,
                ri_index=0
            )
        points_0 = np.concatenate(points_0, axis=0)
        cp_points_0 = np.concatenate(cp_points_0, axis=0)

        # Second return
        points_1, cp_points_1, intensity_1, elongation_1 = \
            self.convert_range_image_to_point_cloud(
                frame,
                range_images,
                camera_projections,
                range_image_top_pose,
                ri_index=1
            )
        points_1 = np.concatenate(points_1, axis=0)
        cp_points_1 = np.concatenate(cp_points_1, axis=0)

        xyzs = np.concatenate([points_0, points_1], axis=0).astype(np.float64)
        cp_points = np.concatenate([cp_points_0, cp_points_1], axis=0)
        valid = cp_points[:, 0] != 0
        xyzs, cp_points = xyzs[valid], cp_points[valid]

        xyzs = self.vehicle_to_world(frame, xyzs)

        img_list = self.frame_image_to_array_list_int64(frame)
        images = np.stack(img_list)
        rgbs = images[cp_points[:, 0], cp_points[:, 2], cp_points[:, 1]]
        
        self.write_lidar_bounds(frame_idx, cp_points)
        
        rgbs = self.array_int64_to_u8(rgbs)

        pc_path = f'{self.point_cloud_save_dir}/' + \
            f'frame{str(frame_idx).zfill(3)}.bin'
        self.write_points3D_binary(xyzs, rgbs, pc_path)

    def get_pose(self, frame):
        """Parse and get the pose data. Pose: Vehicle to World

        Args:
            frame (:obj:`Frame`): Open dataset frame proto.
            file_idx (int): Current file index.
            frame_idx (int): Current frame index.
        """
        pose = np.array(frame.pose.transform).reshape(4, 4)
        return pose

    def create_folder(self):
        """Create folder for data preprocessing."""
        # if not self.test_mode:
        dir_list = [self.point_cloud_save_dir, self.intrinstic_save_dir,
                    self.pose_save_dir,
                    self.extrinstic_save_dir, self.image_save_dir,
                    self.lidar_bound_save_dir, self.distort_save_dir]
        for d in dir_list:
            mmcv.mkdir_or_exist(d)
    
    def write_lidar_bounds(self, frame_idx, cp_points):
        lidar_bounds = np.zeros((6, 2, 2)).astype(int)
        for i in range(6):
            if i == 0:
                continue
            mask = (cp_points[:, 0] == i)
            hgt_max = cp_points[mask][:, 2].max()
            hgt_min = cp_points[mask][:, 2].min()
            width_max = cp_points[mask][:, 1].max()
            width_min = cp_points[mask][:, 1].min()
            lidar_bounds[i, 0] = (hgt_min, hgt_max)
            lidar_bounds[i, 1] = (width_min, width_max)
            np.savetxt(
                f'{self.lidar_bound_save_dir}/' + \
                f'frame{str(frame_idx).zfill(3)}_' + \
                f'cam{i-1}.txt', lidar_bounds[i])

    def write_next_bytes(self, fid, variables, format_char_sequence, endian_character="<"):
        if isinstance(variables, Iterable):
            fid.write(pack(endian_character + format_char_sequence, *variables))
        else:
            fid.write(pack(endian_character + format_char_sequence, variables))

    def write_points3D_binary(self, xyz, rgb, path):

        if xyz.shape[0] != rgb.shape[0]:
            assert False, "Wrong input!"
        with open(path, "wb") as fid:
            num_points = xyz.shape[0]
            self.write_next_bytes(fid, (num_points), "Q")

            for p_id in range(num_points):
                self.write_next_bytes(fid, p_id, "Q")
                self.write_next_bytes(fid, xyz[p_id].tolist(), "ddd")
                self.write_next_bytes(fid, rgb[p_id].tolist(), "BBB")
                self.write_next_bytes(fid, 0, "d") # the "error" code.
                # A point is finished here.
                self.write_next_bytes(fid, 0, "Q") # do for colmap format.

    def frame_image_to_array_list_int64(self, frame):
        img_list = [0] * (len(frame.images) + 1)

        assert frame.images[0].name == 1, "The first image should come from the FRONT(1)." # type(img.name): int
        img_template = mmcv.imfrombytes(frame.images[0].image, channel_order='rgb').astype(int)

        img_list[0] = np.full_like(img_template, -1) # for camera UNKNOWN

        for img in frame.images:
            img_array = mmcv.imfrombytes(img.image, channel_order='rgb').astype(int)
            delta = img_template.shape[0] - img_array.shape[0]
            if delta != 0:
                img_array = np.concatenate((img_array,
                                           np.full((delta, img_template.shape[1], img_template.shape[2]), -1)))
            img_list[img.name] = img_array

        return img_list
    
    def array_int64_to_u8(self, ndary):
        check = (0 <= ndary) & (ndary < 256)
        if not check.all():
            assert False, "Wrong input for images!"
        
        return ndary.astype(np.uint8)
    
    def vehicle_to_world(self, frame, ndary):
        num_pts = len(ndary)
        T_vehicle_to_world = self.get_pose(frame)
        points_homo = np.concatenate((ndary, np.ones((num_pts, 1))), axis=1)

        for i in range(num_pts):
            points_homo[i, :] = np.matmul(T_vehicle_to_world, points_homo[i, :])
        
        return points_homo[:, :3]
    
    def convert_range_image_to_point_cloud(self,
                                           frame,
                                           range_images,
                                           camera_projections,
                                           range_image_top_pose,
                                           ri_index=0):
        """Convert range images to point cloud.

        Args:
            frame (:obj:`Frame`): Open dataset frame.
            range_images (dict): Mapping from laser_name to list of two
                range images corresponding with two returns.
            camera_projections (dict): Mapping from laser_name to list of two
                camera projections corresponding with two returns.
            range_image_top_pose (:obj:`Transform`): Range image pixel pose for
                top lidar.
            ri_index (int): 0 for the first return, 1 for the second return.
                Default: 0.

        Returns:
            tuple[list[np.ndarray]]: (List of points with shape [N, 3],
                camera projections of points with shape [N, 6], intensity
                with shape [N, 1], elongation with shape [N, 1]). All the
                lists have the length of lidar numbers (5).
        """
        calibrations = sorted(
            frame.context.laser_calibrations, key=lambda c: c.name)
        points = []
        cp_points = []
        intensity = []
        elongation = []

        frame_pose = tf.convert_to_tensor(
            value=np.reshape(np.array(frame.pose.transform), [4, 4]))
        # [H, W, 6]
        range_image_top_pose_tensor = tf.reshape(
            tf.convert_to_tensor(value=range_image_top_pose.data),
            range_image_top_pose.shape.dims)
        # [H, W, 3, 3]
        range_image_top_pose_tensor_rotation = \
            transform_utils.get_rotation_matrix(
                range_image_top_pose_tensor[..., 0],
                range_image_top_pose_tensor[..., 1],
                range_image_top_pose_tensor[..., 2])
        range_image_top_pose_tensor_translation = \
            range_image_top_pose_tensor[..., 3:]
        range_image_top_pose_tensor = transform_utils.get_transform(
            range_image_top_pose_tensor_rotation,
            range_image_top_pose_tensor_translation)
        for c in calibrations:
            range_image = range_images[c.name][ri_index]
            if len(c.beam_inclinations) == 0:
                beam_inclinations = range_image_utils.compute_inclination(
                    tf.constant(
                        [c.beam_inclination_min, c.beam_inclination_max]),
                    height=range_image.shape.dims[0])
            else:
                beam_inclinations = tf.constant(c.beam_inclinations)

            beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
            extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])

            range_image_tensor = tf.reshape(
                tf.convert_to_tensor(value=range_image.data),
                range_image.shape.dims)
            pixel_pose_local = None
            frame_pose_local = None
            if c.name == dataset_pb2.LaserName.TOP:
                pixel_pose_local = range_image_top_pose_tensor
                pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
                frame_pose_local = tf.expand_dims(frame_pose, axis=0)
            range_image_mask = range_image_tensor[..., 0] > 0

            if self.filter_no_label_zone_points:
                nlz_mask = range_image_tensor[..., 3] != 1.0  # 1.0: in NLZ
                range_image_mask = range_image_mask & nlz_mask

            range_image_cartesian = \
                range_image_utils.extract_point_cloud_from_range_image(
                    tf.expand_dims(range_image_tensor[..., 0], axis=0),
                    tf.expand_dims(extrinsic, axis=0),
                    tf.expand_dims(tf.convert_to_tensor(
                        value=beam_inclinations), axis=0),
                    pixel_pose=pixel_pose_local,
                    frame_pose=frame_pose_local)

            range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
            points_tensor = tf.gather_nd(range_image_cartesian,
                                         tf.compat.v1.where(range_image_mask))

            cp = camera_projections[c.name][ri_index]
            cp_tensor = tf.reshape(
                tf.convert_to_tensor(value=cp.data), cp.shape.dims)
            cp_points_tensor = tf.gather_nd(
                cp_tensor, tf.compat.v1.where(range_image_mask))
            points.append(points_tensor.numpy())
            cp_points.append(cp_points_tensor.numpy())

            intensity_tensor = tf.gather_nd(range_image_tensor[..., 1],
                                            tf.where(range_image_mask))
            intensity.append(intensity_tensor.numpy())

            elongation_tensor = tf.gather_nd(range_image_tensor[..., 2],
                                             tf.where(range_image_mask))
            elongation.append(elongation_tensor.numpy())

        return points, cp_points, intensity, elongation

    def cart_to_homo(self, mat):
        """Convert transformation matrix in Cartesian coordinates to
        homogeneous format.

        Args:
            mat (np.ndarray): Transformation matrix in Cartesian.
                The input matrix shape is 3x3 or 3x4.

        Returns:
            np.ndarray: Transformation matrix in homogeneous format.
                The matrix shape is 4x4.
        """
        ret = np.eye(4)
        if mat.shape == (3, 3):
            ret[:3, :3] = mat
        elif mat.shape == (3, 4):
            ret[:3, :] = mat
        else:
            raise ValueError(mat.shape)
        return ret
