

import copy
import pickle
import torch.nn.functional as F
import numpy as np
from .km_matcher import KMMatcher
from skimage import io
import torchvision.transforms as transforms
import torch
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..dataset import DatasetTemplate
from PIL import Image


class KittiDataset(DatasetTemplate):
    def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
        """
        Args:
            root_path:
            dataset_cfg:
            class_names:
            training:
            logger:
        """
        super().__init__(
            dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
        )
        self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
        self.root_split_path = self.root_path / \
            ('training' if self.split != 'test' else 'testing')

        split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
        self.sample_id_list = [x.strip() for x in open(
            split_dir).readlines()] if split_dir.exists() else None

        self.kitti_infos = []
        self.include_kitti_data(self.mode)

        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        self.trans = transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])

    def include_kitti_data(self, mode):
        if self.logger is not None:
            self.logger.info('Loading KITTI dataset')
        kitti_infos = []

        for info_path in self.dataset_cfg.INFO_PATH[mode]:
            info_path = self.root_path / info_path
            if not info_path.exists():
                continue
            with open(info_path, 'rb') as f:
                infos = pickle.load(f)
                kitti_infos.extend(infos)

        self.kitti_infos.extend(kitti_infos)

        if self.logger is not None:
            self.logger.info('Total samples for KITTI dataset: %d' %
                             (len(kitti_infos)))

    def set_split(self, split):
        super().__init__(
            dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
        )
        self.split = split
        self.root_split_path = self.root_path / \
            ('training' if self.split != 'test' else 'testing')

        split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
        self.sample_id_list = [x.strip() for x in open(
            split_dir).readlines()] if split_dir.exists() else None

    def get_lidar(self, idx):
        lidar_file = self.root_split_path / 'velodyne/' / ('%s.bin' % idx)
        assert lidar_file.exists()
        data = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
        score = np.zeros((data.shape[0], 2))
        data = np.concatenate((data, score), axis=1)

        return data

    def get_box2(self, idx):
        lidar_file = self.root_split_path / \
            'image_2_box' / ('%s.npy' % idx)
        assert lidar_file.exists()
        data = np.load(str(lidar_file)).reshape(-1,  2 + 4 + 3)

        return data

    def get_box3(self, idx):
        lidar_file = self.root_split_path / \
            'image_3_box' / ('%s.npy' % idx)
        assert lidar_file.exists()
        data = np.load(str(lidar_file)).reshape(-1, 2 + 4 + 3)

        return data

    def get_snake(self, idx):
        img_file = self.root_split_path / \
            'image_2_snake' / ('%s.png' % idx)
        assert img_file.exists()
        image = Image.open(img_file).convert('RGB')
        image = np.array(image)
        return image

    def get_snake3(self, idx):
        img_file = self.root_split_path / \
            'image_3_snake' / ('%s.png' % idx)
        assert img_file.exists()
        image = Image.open(img_file).convert('RGB')
        image = np.array(image)
        return image

    def get_image2(self, idx):
        img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
        assert img_file.exists()
        image = Image.open(img_file).convert('RGB')

        return image

    def get_image3(self, idx):
        img_file = self.root_split_path / 'image_3' / ('%s.png' % idx)
        assert img_file.exists()
        image = Image.open(img_file).convert('RGB')

        return image

    def get_image_shape(self, idx):
        img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
        # print(img_file)
        assert img_file.exists()
        return np.array(io.imread(img_file).shape[:2], dtype=np.int32)

    def get_label(self, idx):
        label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
        assert label_file.exists()
        return object3d_kitti.get_objects_from_label(label_file)

    def get_calib(self, idx):
        calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
        assert calib_file.exists()
        return calibration_kitti.Calibration(calib_file)

    def get_road_plane(self, idx):
        plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
        if not plane_file.exists():
            return None

        with open(plane_file, 'r') as f:
            lines = f.readlines()
        lines = [float(i) for i in lines[3].split()]
        plane = np.asarray(lines)

        # Ensure normal is always facing up, this is in the rectified camera coordinate
        if plane[1] > 0:
            plane = -plane

        norm = np.linalg.norm(plane[0:3])
        plane = plane / norm
        return plane

    @staticmethod
    def get_fov_flag(pts_rect, img_shape, calib):
        """
        Args:
            pts_rect:
            img_shape:
            calib:

        Returns:

        """
        pts_img, pts_rect_depth = calib.rect_to_img_left(pts_rect)
        val_flag_1 = np.logical_and(
            pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
        val_flag_2 = np.logical_and(
            pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
        val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
        pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)

        return pts_valid_flag

    def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
        import concurrent.futures as futures

        def process_single_scene(sample_idx):
            print('%s sample_idx: %s' % (self.split, sample_idx))
            info = {}
            pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
            info['point_cloud'] = pc_info

            image_info = {'image_idx': sample_idx,
                          'image_shape': self.get_image_shape(sample_idx)}
            info['image'] = image_info
            calib = self.get_calib(sample_idx)

            P2 = np.concatenate(
                [calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
            R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
            R0_4x4[3, 3] = 1.
            R0_4x4[:3, :3] = calib.R0
            V2C_4x4 = np.concatenate(
                [calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
            calib_info = {'P2': P2, 'R0_rect': R0_4x4,
                          'Tr_velo_to_cam': V2C_4x4}

            info['calib'] = calib_info

            if has_label:
                obj_list = self.get_label(sample_idx)
                annotations = {}
                annotations['name'] = np.array(
                    [obj.cls_type for obj in obj_list])
                annotations['truncated'] = np.array(
                    [obj.truncation for obj in obj_list])
                annotations['occluded'] = np.array(
                    [obj.occlusion for obj in obj_list])
                annotations['alpha'] = np.array(
                    [obj.alpha for obj in obj_list])
                annotations['bbox'] = np.concatenate(
                    [obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
                annotations['dimensions'] = np.array(
                    [[obj.l, obj.h, obj.w] for obj in obj_list])  # lhw(camera) format
                annotations['location'] = np.concatenate(
                    [obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
                annotations['rotation_y'] = np.array(
                    [obj.ry for obj in obj_list])
                annotations['score'] = np.array(
                    [obj.score for obj in obj_list])
                annotations['difficulty'] = np.array(
                    [obj.level for obj in obj_list], np.int32)

                num_objects = len(
                    [obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
                num_gt = len(annotations['name'])
                index = list(range(num_objects)) + \
                    [-1] * (num_gt - num_objects)
                annotations['index'] = np.array(index, dtype=np.int32)

                loc = annotations['location'][:num_objects]
                dims = annotations['dimensions'][:num_objects]
                rots = annotations['rotation_y'][:num_objects]
                loc_lidar = calib.rect_to_lidar(loc)
                l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
                loc_lidar[:, 2] += h[:, 0] / 2
                gt_boxes_lidar = np.concatenate(
                    [loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
                annotations['gt_boxes_lidar'] = gt_boxes_lidar

                info['annos'] = annotations

                if count_inside_pts:
                    points = self.get_lidar(sample_idx)
                    calib = self.get_calib(sample_idx)
                    pts_rect = calib.lidar_to_rect(points[:, 0:3])

                    fov_flag = self.get_fov_flag(
                        pts_rect, info['image']['image_shape'], calib)
                    pts_fov = points[fov_flag]
                    corners_lidar = box_utils.boxes_to_corners_3d(
                        gt_boxes_lidar)
                    num_points_in_gt = -np.ones(num_gt, dtype=np.int32)

                    for k in range(num_objects):
                        flag = box_utils.in_hull(
                            pts_fov[:, 0:3], corners_lidar[k])
                        num_points_in_gt[k] = flag.sum()
                    annotations['num_points_in_gt'] = num_points_in_gt

            return info

        sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
        with futures.ThreadPoolExecutor(num_workers) as executor:
            infos = executor.map(process_single_scene, sample_id_list)
        return list(infos)

    def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
        import torch

        database_save_path = Path(
            self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
        db_info_save_path = Path(self.root_path) / \
            ('kitti_dbinfos_%s.pkl' % split)

        database_save_path.mkdir(parents=True, exist_ok=True)
        all_db_infos = {}

        with open(info_path, 'rb') as f:
            infos = pickle.load(f)

        for k in range(len(infos)):
            print('gt_database sample: %d/%d' % (k + 1, len(infos)))
            info = infos[k]
            sample_idx = info['point_cloud']['lidar_idx']
            points = self.get_lidar(sample_idx)

            calib = self.get_calib(sample_idx)
            raw_image2 = self.get_image2(sample_idx)
            raw_image2 = np.array(raw_image2)
            raw_image3 = self.get_image3(sample_idx)
            raw_image3 = np.array(raw_image3)

            snake_image2 = self.get_snake(sample_idx)
            snake_image3 = self.get_snake3(sample_idx)
            raw_box2 = self.get_box2(sample_idx)

            raw_box3 = self.get_box3(sample_idx)

            mask = raw_box2[:, 6] == 4
            box2 = raw_box2[mask].copy()
            mask = raw_box3[:, 6] == 4
            box3 = raw_box3[mask].copy()

            annos = info['annos'].copy()
            names = annos['name']
            difficulty = annos['difficulty']
            bbox = annos['bbox']
            gt_boxes = annos['gt_boxes_lidar'].copy()

            num_obj = gt_boxes.shape[0]
            point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
                torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
            ).numpy()  # (nboxes, npoints)

            gt_boxes_2d = box_utils.enlarge_height(
                gt_boxes[:, 0:7], extra_1=0, extra_2=0, extra_width=0
            )

            gt_boxes_2d = box_utils.boxes3d_lidar_to_kitti_camera(
                gt_boxes_2d, calib)

            gt_boxes_2d_left = box_utils.boxes3d_kitti_camera_to_imageboxes(
                gt_boxes_2d, calib, image_shape=snake_image2.shape)
            gt_boxes_2d_left[:, :2] = np.ceil(gt_boxes_2d_left[:, :2])
            gt_boxes_2d_left[:, 2:] = np.floor(gt_boxes_2d_left[:, 2:])
            gt_boxes_2d_left = gt_boxes_2d_left.astype(np.int)

            gt_boxes_2d_right = box_utils.boxes3d_kitti_camera_to_imageboxes_right(
                gt_boxes_2d, calib, image_shape=snake_image2.shape)
            gt_boxes_2d_right[:, :2] = np.ceil(gt_boxes_2d_right[:, :2])
            gt_boxes_2d_right[:, 2:] = np.floor(gt_boxes_2d_right[:, 2:])
            gt_boxes_2d_right = gt_boxes_2d_right.astype(np.int)

            cost_score = np.zeros([box2.shape[0], box3.shape[0] + 10])
            cost_score[:, :] = -100
            for i in range(box2.shape[0]):
                for j in range(box3.shape[0]):
                    score_2 = box2[i, 7]
                    score_3 = box3[j, 7]
                    height = (box2[i, 5] - box2[i, 3])
                    distance_2 = box2[i, 1]
                    distance_3 = box3[j, 1]
                    cost_score[i, j] = - abs(distance_2 - distance_3) * \
                        25 / height + - abs(score_2 - score_3) * 0.04

            # using matching algorithm to match left and right objects
            matcher = KMMatcher(cost_score)

            match_idx, score = matcher.solve(verbose=False)

            match_idx = match_idx.astype(np.int)
            cost_score2 = np.zeros(
                [gt_boxes_2d_left.shape[0], box2.shape[0] + 30])
            cost_score2[:, :] = -100

            for i in range(gt_boxes_2d_left.shape[0]):
                if names[i] != 'Car':
                    continue
                for j in range(box2.shape[0]):
                    if match_idx[j] >= box3.shape[0]:
                        continue
                    height = (box2[j, 5] - box2[j, 3])
                    width = (box2[j, 4] - box2[j, 2])
                    low_2 = abs(gt_boxes_2d_left[i, 3] - box2[j, 1]) / height
                    low_3 = abs(
                        gt_boxes_2d_right[i, 3] - box3[match_idx[j], 1]) / height

                    center_2 = abs(
                        (gt_boxes_2d_left[i, 2] + gt_boxes_2d_left[i, 0]) / 2 - box2[j, 0]) / width
                    center_3 = abs(
                        (gt_boxes_2d_right[i, 2] + gt_boxes_2d_right[i, 0]) / 2 - box3[match_idx[j], 0]) / width
                    cost_score2[i, j] = -   low_2 - low_3 - center_2 - center_3

            # using matching algorithm to match KITTI and snake boxes
            matcher = KMMatcher(cost_score2)

            match_idx2, score2 = matcher.solve(verbose=False)

            match_idx2 = match_idx2.astype(np.int)

            # filter the corrupted matching pairs using mannually designed rules . Do it three times, for car, pedestrain and cyclist
            for i in range(num_obj):
                if names[i] != 'Car':
                    continue
                if difficulty[i] == -1:
                    continue

                if match_idx2[i] >= box2.shape[0]:
                    continue
                if match_idx[match_idx2[i]] >= box3.shape[0]:
                    continue

                if score2[i] < -1:
                    continue
                mask = snake_image2 == box2[match_idx2[i],  8]
                mask2 = snake_image3 == box3[match_idx[match_idx2[i]], 8]

                sum1 = mask.sum() / 3
                sum2 = mask2.sum() / 3

                if sum1 < 100:
                    continue
                if sum2 < 100:
                    continue
                if (sum1 / sum2 > 1.5) or (sum2 / sum1 > 1.5):
                    continue

                mask_rate = sum1 / \
                    ((bbox[i][2] - bbox[i][0]) * (bbox[i][3] - bbox[i][1]))

                if mask_rate <= 0.1:
                    continue

                mask = snake_image2 != box2[match_idx2[i],  8]
                left_obj_image = raw_image2.copy()
                left_obj_image[mask] = 0

                left_obj_image = left_obj_image[int(gt_boxes_2d_left[i][1]):int(
                    gt_boxes_2d_left[i][3]),   int(gt_boxes_2d_left[i][0]):  int(gt_boxes_2d_left[i][2]), :]

                mask = snake_image3 != box3[match_idx[match_idx2[i]], 8]
                right_obj_image = raw_image3.copy()
                right_obj_image[mask] = 0
                right_obj_image = right_obj_image[int(gt_boxes_2d_right[i][1]):   int(
                    gt_boxes_2d_right[i][3]),  int(gt_boxes_2d_right[i][0]):   int(gt_boxes_2d_right[i][2]), :]

                filename_lidar = '%s_%s_%d.bin' % (sample_idx, names[i], i)
                filename_left = '%s_%s_%d_left.png' % (sample_idx, names[i], i)
                filename_right = '%s_%s_%d_right.png' % (
                    sample_idx, names[i], i)

                filepath_lidar = database_save_path / filename_lidar
                filepath_left = database_save_path / filename_left
                filepath_right = database_save_path / filename_right

                gt_points = points[point_indices[i] > 0]

                gt_points[:, :3] -= gt_boxes[i, :3]

                gt_points = gt_points.astype(np.float32)
                with open(filepath_lidar, 'w') as f:
                    gt_points.tofile(f)

                gt_points_local = common_utils.rotate_points_along_z(
                    gt_points[np.newaxis, :, :].copy(
                    ), -gt_boxes[np.newaxis, i, 6].copy()
                )
                gt_points_local[0, :, :3] += gt_boxes[i, :3]

                left_obj_image = left_obj_image.astype(np.uint8)
                left_obj_image = Image.fromarray(left_obj_image)
                left_obj_image.save(filepath_left)

                right_obj_image = right_obj_image.astype(np.uint8)
                right_obj_image = Image.fromarray(right_obj_image)
                right_obj_image.save(filepath_right)

                if (used_classes is None) or names[i] in used_classes:
                    db_path = str(filepath_lidar.relative_to(
                        self.root_path))   
                    db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
                               'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
                               'difficulty': difficulty[i], 'bbox': bbox[i], 'inmodal_box2': box2[match_idx2[i], 2:6], 'inmodal_box3': box3[match_idx[match_idx2[i]], 2:6],  'score': annos['score'][i]   }   

                    if names[i] in all_db_infos:
                        all_db_infos[names[i]].append(db_info)
                    else:
                        all_db_infos[names[i]] = [db_info]

            mask = raw_box2[:, 6] == 1
            box2 = raw_box2[mask].copy()
            mask = raw_box3[:, 6] == 1
            box3 = raw_box3[mask].copy()

            annos = info['annos'].copy()
            names = annos['name']
            difficulty = annos['difficulty']
            bbox = annos['bbox']
            gt_boxes = annos['gt_boxes_lidar'].copy()
       

            num_obj = gt_boxes.shape[0]
            point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
                torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
            ).numpy()  # (nboxes, npoints)

            ############ Fusion #############

            gt_boxes_2d = box_utils.enlarge_height(
                gt_boxes[:, 0:7], extra_1=0, extra_2=0, extra_width=0
            )

            gt_boxes_2d = box_utils.boxes3d_lidar_to_kitti_camera(
                gt_boxes_2d, calib)

            gt_boxes_2d_left = box_utils.boxes3d_kitti_camera_to_imageboxes(
                gt_boxes_2d, calib, image_shape=snake_image2.shape)
            gt_boxes_2d_left[:, :2] = np.ceil(gt_boxes_2d_left[:, :2])
            gt_boxes_2d_left[:, 2:] = np.floor(gt_boxes_2d_left[:, 2:])
            gt_boxes_2d_left = gt_boxes_2d_left.astype(np.int)

            gt_boxes_2d_right = box_utils.boxes3d_kitti_camera_to_imageboxes_right(
                gt_boxes_2d, calib, image_shape=snake_image2.shape)
            gt_boxes_2d_right[:, :2] = np.ceil(gt_boxes_2d_right[:, :2])
            gt_boxes_2d_right[:, 2:] = np.floor(gt_boxes_2d_right[:, 2:])
            gt_boxes_2d_right = gt_boxes_2d_right.astype(np.int)

            cost_score = np.zeros([box2.shape[0], box3.shape[0] + 10])
            cost_score[:, :] = -100
            for i in range(box2.shape[0]):
                for j in range(box3.shape[0]):
                    score_2 = box2[i, 7]
                    score_3 = box3[j, 7]
                    height = (box2[i, 5] - box2[i, 3])
                    distance_2 = box2[i, 1]
                    distance_3 = box3[j, 1]
                    cost_score[i, j] = - abs(distance_2 - distance_3) * \
                        25 / height + - abs(score_2 - score_3) * 0.04  # 0.04

            matcher = KMMatcher(cost_score)

            match_idx, score = matcher.solve(verbose=False)

            match_idx = match_idx.astype(np.int)
            cost_score2 = np.zeros(
                [gt_boxes_2d_left.shape[0], box2.shape[0] + 30])
            cost_score2[:, :] = -100

            for i in range(gt_boxes_2d_left.shape[0]):
                if names[i] != 'Cyclist':
                    continue
                for j in range(box2.shape[0]):
                    if match_idx[j] >= box3.shape[0]:
                        continue
                    height = (box2[j, 5] - box2[j, 3])
                    width = (box2[j, 4] - box2[j, 2])
                    low_2 = abs(gt_boxes_2d_left[i, 3] - box2[j, 1]) / height
                    low_3 = abs(
                        gt_boxes_2d_right[i, 3] - box3[match_idx[j], 1]) / height

                    center_2 = abs(
                        (gt_boxes_2d_left[i, 2] + gt_boxes_2d_left[i, 0]) / 2 - box2[j, 0]) / width
                    center_3 = abs(
                        (gt_boxes_2d_right[i, 2] + gt_boxes_2d_right[i, 0]) / 2 - box3[match_idx[j], 0]) / width
                    cost_score2[i, j] = -   low_2 - low_3 - center_2 - center_3

            matcher = KMMatcher(cost_score2)

            match_idx2, score2 = matcher.solve(verbose=False)

            match_idx2 = match_idx2.astype(np.int)

            # filter the corrupted matching pairs using mannually designed rules . Do it three times, for car, pedestrain and cyclist
            for i in range(num_obj):
                if names[i] != 'Cyclist':
                    continue
                if difficulty[i] == -1:
                    continue

                if match_idx2[i] >= box2.shape[0]:
                    continue
                if match_idx[match_idx2[i]] >= box3.shape[0]:
                    continue

                if score2[i] < -1:
                    continue
                mask = snake_image2 == box2[match_idx2[i],  8]
                mask2 = snake_image3 == box3[match_idx[match_idx2[i]], 8]

                sum1 = mask.sum() / 3
                sum2 = mask2.sum() / 3

                if sum1 < 20:
                    continue
                if sum2 < 20:
                    continue
                if (sum1 / sum2 > 1.5) or (sum2 / sum1 > 1.5):
                    continue

                mask_rate = sum1 / \
                    ((bbox[i][2] - bbox[i][0]) * (bbox[i][3] - bbox[i][1]))

                if mask_rate <= 0.1:
                    continue

                mask = snake_image2 != box2[match_idx2[i],  8]
                left_obj_image = raw_image2.copy()
                left_obj_image[mask] = 0

                left_obj_image = left_obj_image[int(gt_boxes_2d_left[i][1]):int(
                    gt_boxes_2d_left[i][3]),   int(gt_boxes_2d_left[i][0]):  int(gt_boxes_2d_left[i][2]), :]

                mask = snake_image3 != box3[match_idx[match_idx2[i]], 8]
                right_obj_image = raw_image3.copy()
                right_obj_image[mask] = 0
                right_obj_image = right_obj_image[int(gt_boxes_2d_right[i][1]):   int(
                    gt_boxes_2d_right[i][3]),  int(gt_boxes_2d_right[i][0]):   int(gt_boxes_2d_right[i][2]), :]

                filename_lidar = '%s_%s_%d.bin' % (sample_idx, names[i], i)
                filename_left = '%s_%s_%d_left.png' % (sample_idx, names[i], i)
                filename_right = '%s_%s_%d_right.png' % (
                    sample_idx, names[i], i)

                filepath_lidar = database_save_path / filename_lidar
                filepath_left = database_save_path / filename_left
                filepath_right = database_save_path / filename_right

                gt_points = points[point_indices[i] > 0]

                gt_points[:, :3] -= gt_boxes[i, :3]

                gt_points = gt_points.astype(np.float32)
                with open(filepath_lidar, 'w') as f:
                    gt_points.tofile(f)

                gt_points_local = common_utils.rotate_points_along_z(
                    gt_points[np.newaxis, :, :].copy(
                    ), -gt_boxes[np.newaxis, i, 6].copy()
                )
                gt_points_local[0, :, :3] += gt_boxes[i, :3]

                left_obj_image = left_obj_image.astype(np.uint8)
                left_obj_image = Image.fromarray(left_obj_image)
                left_obj_image.save(filepath_left)

                right_obj_image = right_obj_image.astype(np.uint8)
                right_obj_image = Image.fromarray(right_obj_image)
                right_obj_image.save(filepath_right)

                if (used_classes is None) or names[i] in used_classes:
                    db_path = str(filepath_lidar.relative_to(
                        self.root_path))   
                    db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
                               'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
                               'difficulty': difficulty[i], 'bbox': bbox[i], 'inmodal_box2': box2[match_idx2[i], 2:6], 'inmodal_box3': box3[match_idx[match_idx2[i]], 2:6],  'score': annos['score'][i]   }   

                    if names[i] in all_db_infos:
                        all_db_infos[names[i]].append(db_info)
                    else:
                        all_db_infos[names[i]] = [db_info]

            mask = raw_box2[:, 6] == 2
            box2 = raw_box2[mask].copy()
            mask = raw_box3[:, 6] == 2
            box3 = raw_box3[mask].copy()

            annos = info['annos'].copy()
            names = annos['name']
            difficulty = annos['difficulty']
            bbox = annos['bbox']
            gt_boxes = annos['gt_boxes_lidar'].copy()

            num_obj = gt_boxes.shape[0]
            point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
                torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
            ).numpy()  # (nboxes, npoints)

            ############ Fusion #############

            gt_boxes_2d = box_utils.enlarge_height(
                gt_boxes[:, 0:7], extra_1=0, extra_2=0, extra_width=0
            )

            gt_boxes_2d = box_utils.boxes3d_lidar_to_kitti_camera(
                gt_boxes_2d, calib)

            gt_boxes_2d_left = box_utils.boxes3d_kitti_camera_to_imageboxes(
                gt_boxes_2d, calib, image_shape=snake_image2.shape)
            gt_boxes_2d_left[:, :2] = np.ceil(gt_boxes_2d_left[:, :2])
            gt_boxes_2d_left[:, 2:] = np.floor(gt_boxes_2d_left[:, 2:])
            gt_boxes_2d_left = gt_boxes_2d_left.astype(np.int)

            gt_boxes_2d_right = box_utils.boxes3d_kitti_camera_to_imageboxes_right(
                gt_boxes_2d, calib, image_shape=snake_image2.shape)
            gt_boxes_2d_right[:, :2] = np.ceil(gt_boxes_2d_right[:, :2])
            gt_boxes_2d_right[:, 2:] = np.floor(gt_boxes_2d_right[:, 2:])
            gt_boxes_2d_right = gt_boxes_2d_right.astype(np.int)

            ###########################

            cost_score = np.zeros([box2.shape[0], box3.shape[0] + 30])
            cost_score[:, :] = -100
            for i in range(box2.shape[0]):
                for j in range(box3.shape[0]):
                    score_2 = box2[i, 7]
                    score_3 = box3[j, 7]
                    height = (box2[i, 5] - box2[i, 3])
                    distance_2 = box2[i, 1]
                    distance_3 = box3[j, 1]
                    cost_score[i, j] = - abs(distance_2 - distance_3) * \
                        25 / height + - abs(score_2 - score_3) * 0.04

            matcher = KMMatcher(cost_score)

            match_idx, score = matcher.solve(verbose=False)
            # print(match_idx , score)

            match_idx = match_idx.astype(np.int)
            cost_score2 = np.zeros(
                [gt_boxes_2d_left.shape[0], box2.shape[0] + 30])
            cost_score2[:, :] = -100

            for i in range(gt_boxes_2d_left.shape[0]):
                if names[i] != 'Pedestrian':
                    continue
                for j in range(box2.shape[0]):
                    if match_idx[j] >= box3.shape[0]:
                        continue
                    height = (box2[j, 5] - box2[j, 3])
                    width = (box2[j, 4] - box2[j, 2])
                    low_2 = abs(gt_boxes_2d_left[i, 3] - box2[j, 1]) / height
                    low_3 = abs(
                        gt_boxes_2d_right[i, 3] - box3[match_idx[j], 1]) / height

                    center_2 = abs(
                        (gt_boxes_2d_left[i, 2] + gt_boxes_2d_left[i, 0]) / 2 - box2[j, 0]) / width
                    center_3 = abs(
                        (gt_boxes_2d_right[i, 2] + gt_boxes_2d_right[i, 0]) / 2 - box3[match_idx[j], 0]) / width
                    cost_score2[i, j] = -   low_2 - low_3 - center_2 - center_3

            matcher = KMMatcher(cost_score2)

            match_idx2, score2 = matcher.solve(verbose=False)

            match_idx2 = match_idx2.astype(np.int)

            # filter the corrupted matching pairs using mannually designed rules . Do it three times, for car, pedestrain and cyclist

            for i in range(num_obj):
                if names[i] != 'Pedestrian':
                    continue
                if difficulty[i] == -1:
                    continue

                if match_idx2[i] >= box2.shape[0]:
                    continue
                if match_idx[match_idx2[i]] >= box3.shape[0]:
                    continue

                if score2[i] < -1:
                    continue
                mask = snake_image2 == box2[match_idx2[i],  8]
                mask2 = snake_image3 == box3[match_idx[match_idx2[i]], 8]

                sum1 = mask.sum() / 3
                sum2 = mask2.sum() / 3

                if sum1 < 20:
                    continue
                if sum2 < 20:
                    continue
                if (sum1 / sum2 > 1.5) or (sum2 / sum1 > 1.5):
                    continue

                mask_rate = sum1 / \
                    ((bbox[i][2] - bbox[i][0]) * (bbox[i][3] - bbox[i][1]))

                if mask_rate <= 0.1:
                    continue

                mask = snake_image2 != box2[match_idx2[i],  8]
                left_obj_image = raw_image2.copy()
                left_obj_image[mask] = 0

                left_obj_image = left_obj_image[int(gt_boxes_2d_left[i][1]):int(
                    gt_boxes_2d_left[i][3]),   int(gt_boxes_2d_left[i][0]):  int(gt_boxes_2d_left[i][2]), :]

                mask = snake_image3 != box3[match_idx[match_idx2[i]], 8]
                right_obj_image = raw_image3.copy()
                right_obj_image[mask] = 0
                right_obj_image = right_obj_image[int(gt_boxes_2d_right[i][1]):   int(
                    gt_boxes_2d_right[i][3]),  int(gt_boxes_2d_right[i][0]):   int(gt_boxes_2d_right[i][2]), :]

                filename_lidar = '%s_%s_%d.bin' % (sample_idx, names[i], i)
                filename_left = '%s_%s_%d_left.png' % (sample_idx, names[i], i)
                filename_right = '%s_%s_%d_right.png' % (
                    sample_idx, names[i], i)

                filepath_lidar = database_save_path / filename_lidar
                filepath_left = database_save_path / filename_left
                filepath_right = database_save_path / filename_right

                gt_points = points[point_indices[i] > 0]

                gt_points[:, :3] -= gt_boxes[i, :3]

                gt_points = gt_points.astype(np.float32)
                with open(filepath_lidar, 'w') as f:
                    gt_points.tofile(f)

                gt_points_local = common_utils.rotate_points_along_z(
                    gt_points[np.newaxis, :, :].copy(
                    ), -gt_boxes[np.newaxis, i, 6].copy()
                )
                gt_points_local[0, :, :3] += gt_boxes[i, :3]

                left_obj_image = left_obj_image.astype(np.uint8)
                left_obj_image = Image.fromarray(left_obj_image)
                left_obj_image.save(filepath_left)

                right_obj_image = right_obj_image.astype(np.uint8)
                right_obj_image = Image.fromarray(right_obj_image)
                right_obj_image.save(filepath_right)

                if (used_classes is None) or names[i] in used_classes:
                    db_path = str(filepath_lidar.relative_to(
                        self.root_path))
                    db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
                               'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
                               'difficulty': difficulty[i], 'bbox': bbox[i], 'inmodal_box2': box2[match_idx2[i], 2:6], 'inmodal_box3': box3[match_idx[match_idx2[i]], 2:6],  'score': annos['score'][i]   }   

                    if names[i] in all_db_infos:
                        all_db_infos[names[i]].append(db_info)
                    else:
                        all_db_infos[names[i]] = [db_info]

        for k, v in all_db_infos.items():
            print('Database %s: %d' % (k, len(v)))

        with open(db_info_save_path, 'wb') as f:
            pickle.dump(all_db_infos, f)

    @staticmethod
    def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
        """
        Args:
            batch_dict:
                frame_id:
            pred_dicts: list of pred_dicts
                pred_boxes: (N, 7), Tensor
                pred_scores: (N), Tensor
                pred_labels: (N), Tensor
            class_names:
            output_path:

        Returns:

        """
        def get_template_prediction(num_samples):
            ret_dict = {
                'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
                'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
                'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
                'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
                'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
            }
            return ret_dict

        def generate_single_sample_dict(batch_index, box_dict):
            pred_scores = box_dict['pred_scores'].cpu().numpy()
            pred_boxes = box_dict['pred_boxes'].cpu().numpy()
            pred_labels = box_dict['pred_labels'].cpu().numpy()
            pred_dict = get_template_prediction(pred_scores.shape[0])
            if pred_scores.shape[0] == 0:
                return pred_dict

            calib = batch_dict['calib'][batch_index]
            image_shape = batch_dict['image_shape'][batch_index]
            pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(
                pred_boxes, calib)
            pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
                pred_boxes_camera, calib, image_shape=image_shape
            )

            pred_dict['name'] = np.array(class_names)[pred_labels - 1]
            pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1],
                                             pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
            pred_dict['bbox'] = pred_boxes_img
            pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
            pred_dict['location'] = pred_boxes_camera[:, 0:3]
            pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
            pred_dict['score'] = pred_scores
            pred_dict['boxes_lidar'] = pred_boxes

            return pred_dict

        annos = []
        for index, box_dict in enumerate(pred_dicts):
            frame_id = batch_dict['frame_id'][index]

            single_pred_dict = generate_single_sample_dict(index, box_dict)
            single_pred_dict['frame_id'] = frame_id
            annos.append(single_pred_dict)

            if output_path is not None:
                cur_det_file = output_path / ('%s.txt' % frame_id)
                with open(cur_det_file, 'w') as f:
                    bbox = single_pred_dict['bbox']
                    loc = single_pred_dict['location']
                    dims = single_pred_dict['dimensions']  # lhw -> hwl

                    for idx in range(len(bbox)):
                        print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
                              % (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
                                 bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
                                 dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
                                 loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
                                 single_pred_dict['score'][idx]), file=f)

        return annos

    def evaluation(self, det_annos, class_names, **kwargs):
        if 'annos' not in self.kitti_infos[0].keys():
            return None, {}

        from .kitti_object_eval_python import eval as kitti_eval

        eval_det_annos = copy.deepcopy(det_annos)
        eval_gt_annos = [copy.deepcopy(info['annos'])
                         for info in self.kitti_infos]
        ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
            eval_gt_annos, eval_det_annos, class_names)

        return ap_result_str, ap_dict

    def __len__(self):
        if self._merge_all_iters_to_one_epoch:
            return len(self.kitti_infos) * self.total_epochs

        return len(self.kitti_infos)

    def __getitem__(self, index):
        # index = 4
        if self._merge_all_iters_to_one_epoch:
            index = index % len(self.kitti_infos)

        info = copy.deepcopy(self.kitti_infos[index])

        sample_idx = info['point_cloud']['lidar_idx']

        points = self.get_lidar(sample_idx)
        calib = self.get_calib(sample_idx)
        operation = np.empty([3], dtype=float)

        left_img = self.get_image2(sample_idx)
        right_img = self.get_image3(sample_idx)

        img_shape = info['image']['image_shape']

        if self.dataset_cfg.FOV_POINTS_ONLY:
            pts_rect = calib.lidar_to_rect(points[:, 0:3])
            fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
            points = points[fov_flag]

        if self.training:
            snake = self.get_snake(sample_idx)
            snake = snake[:, :, 0]
        else:
            snake = []

        if not self.training:

            left_img = self.trans(left_img)
            right_img = self.trans(right_img)

            C, H, W = left_img.shape

            right_pad = 1248 - W

            h_shift = left_img.shape[1] - 224
            left_img = F.pad(left_img, (0, right_pad, 0, 0), "constant", 0)
            right_img = F.pad(right_img, (0, right_pad, 0, 0), "constant", 0)

            left_img = left_img[:, -224:, :]
            right_img = right_img[:, -224:, :]

        input_dict = {
            'points': points,
            'frame_id': sample_idx,
            'calib': calib,
            'left_img': left_img,
            'right_img': right_img,
            'snake': snake,
            'image_shape': img_shape,
            'operation': operation,
        }
        if not self.training:

            input_dict['h_shift'] = h_shift

        if 'annos' in info:
            annos = info['annos']
            annos = common_utils.drop_info_with_name(annos, name='DontCare')
            loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
            gt_names = annos['name']
            gt_difficulty = annos['difficulty']

            gt_boxes_camera = np.concatenate(
                [loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
            gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(
                gt_boxes_camera, calib)

            input_dict.update({
                'gt_names': gt_names,
                'gt_boxes': gt_boxes_lidar,
                'gt_difficulty': gt_difficulty,


            })
            road_plane = self.get_road_plane(sample_idx)
            if road_plane is not None:
                input_dict['road_plane'] = road_plane

        data_dict = self.prepare_data(data_dict=input_dict)

        return data_dict


def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=16):
    dataset = KittiDataset(dataset_cfg=dataset_cfg,
                           class_names=class_names, root_path=data_path, training=False)
    train_split, val_split = 'train', 'val'

    train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
    val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
    trainval_filename = save_path / 'kitti_infos_trainval.pkl'
    test_filename = save_path / 'kitti_infos_test.pkl'

    print('---------------Start to generate data infos---------------')

    dataset.set_split(train_split)
    kitti_infos_train = dataset.get_infos(
        num_workers=workers, has_label=True, count_inside_pts=True)
    with open(train_filename, 'wb') as f:
        pickle.dump(kitti_infos_train, f)
    print('Kitti info train file is saved to %s' % train_filename)

    dataset.set_split(val_split)
    kitti_infos_val = dataset.get_infos(
        num_workers=workers, has_label=True, count_inside_pts=True)
    with open(val_filename, 'wb') as f:
        pickle.dump(kitti_infos_val, f)
    print('Kitti info val file is saved to %s' % val_filename)

    with open(trainval_filename, 'wb') as f:
        pickle.dump(kitti_infos_train + kitti_infos_val, f)
    print('Kitti info trainval file is saved to %s' % trainval_filename)

    dataset.set_split('test')
    kitti_infos_test = dataset.get_infos(
        num_workers=workers, has_label=False, count_inside_pts=False)
    with open(test_filename, 'wb') as f:
        pickle.dump(kitti_infos_test, f)
    print('Kitti info test file is saved to %s' % test_filename)

    print('---------------Start create groundtruth database for data augmentation---------------')
    dataset.set_split(train_split)
    dataset.create_groundtruth_database(train_filename, split=train_split)

    print('---------------Data preparation Done---------------')


if __name__ == '__main__':
    import sys
    if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
        import yaml
        from pathlib import Path
        from easydict import EasyDict
        dataset_cfg = EasyDict(yaml.load(open(sys.argv[2])))
        ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
        create_kitti_infos(
            dataset_cfg=dataset_cfg,
            class_names=['Car', 'Pedestrian', 'Cyclist'],
            data_path=ROOT_DIR / 'data' / 'kitti',
            save_path=ROOT_DIR / 'data' / 'kitti'
        )
