import os
import glob
import torch
import pickle
import shutil
import argparse

import numpy as np

from tqdm import tqdm
from pathlib import Path

from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets import DatasetTemplate
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils


if os.name == 'posix' and "DISPLAY" not in os.environ:
    headless_server = True
else:
    headless_server = False
    from tools.visual_utils import visualize_utils as V
    import mayavi.mlab as mlab



def parse_config():

    parser = argparse.ArgumentParser(description='arg parser')

    parser.add_argument('-m', '--method', type=str, default='pv_rcnn', help='specify the method')
    parser.add_argument('-d', '--dataset', type=str, default='DENSE', help='specify the dataset')
    parser.add_argument('-r', '--dataset_root', type=str, help='specify the dataset root',
                        default='/srv/beegfs-benderdata/scratch/tracezuerich/data/datasets/')
    parser.add_argument('-c', '--copy_image', type=bool, help='specify if images should be copied to DEMO folder',
                        default=True)

    args = parser.parse_args()

    cfg_file = get_cfg_file(args.method)

    cfg_from_yaml_file(cfg_file, cfg)

    return args, cfg


class DemoDataset(DatasetTemplate):

    def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin',
                 num_features=4):
        """
        Args:
            root_path:
            dataset_cfg:
            class_names:
            training:
            logger:
        """
        super().__init__(
            dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
        )
        self.root_path = root_path
        self.ext = ext
        data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path]

        data_file_list.sort()
        self.sample_file_list = data_file_list
        self.num_features = num_features

    def __len__(self):
        return len(self.sample_file_list)

    def __getitem__(self, index):
        if self.ext == '.bin':

            read_out = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, self.num_features)

            if self.num_features == 3:
                points = np.zeros((read_out.shape[0],4))
                points[:, :-1] = read_out
            else:

                if read_out[:, 3].max() > 1:
                    read_out[:, 3] = read_out[:, 3] / 255

                points = read_out[:, :4]

        elif self.ext == '.npy':
            points = np.load(self.sample_file_list[index])
        else:
            raise NotImplementedError

        input_dict = {
            'points': points,
            'frame_id': index,
        }

        data_dict = self.prepare_data(data_dict=input_dict)
        return data_dict


def get_ckpt_file(method: str) -> str:

    kitti_prefix = 'output/kitti_models'
    nuscenes_prefix = 'output/nuscenes_models'

    best_runs = {'PartA2': f'{kitti_prefix}/{method}/2020-07-15_12-25-30_9e0a25a_batch_size_4',
                 'PartA2_free': f'{kitti_prefix}/{method}/2020-07-31_21-35-21_074cb37_batch_size_8',
                 'pointpillar': f'{kitti_prefix}/{method}/2020-07-27_17-21-21_2176653_batch_size_16',
                 'pointrcnn': f'{kitti_prefix}/{method}/2020-08-02_21-45-57_f2563ed_default',
                 'pointrcnn_iou': f'{kitti_prefix}/{method}/2020-08-02_21-46-06_b1cde4b_default',
                 'pv_rcnn': f'{kitti_prefix}/{method}/2020-07-15_12-26-26_d508a52_batch_size_2',
                 'second': f'{kitti_prefix}/{method}/2020-07-15_12-24-42_be9174b_batch_size_4',
                 'second_multihead': f'{kitti_prefix}/{method}/2020-09-02_13-41-36_aa4e3dc_default',
                 'cbgs_pp_multihead': f'{nuscenes_prefix}/{method}/2020-08-10_09-31-32_79dd494_default',
                 'cbgs_second_multihead': f'{nuscenes_prefix}/{method}/2020-08-07_15-18-34_8acd661_default'}

    assert method in best_runs.keys(), f'Unknown method "{method}"'

    repo_root = Path(os.getcwd()).parent

    for file in os.listdir(repo_root / best_runs[method]):

        if file.endswith('.pth'):

            path = repo_root / best_runs[method] / file

            return str(path)


def get_cfg_file(method: str) -> str:

    possible_datasets = ['kitti', 'nuscenes']

    for dataset in possible_datasets:

        cfg_file = f'cfgs/{dataset}_models/{method}.yaml'

        if os.path.isfile(cfg_file):

            return cfg_file


def get_dataset_components(dataset_root: str, dataset: str) -> (str, str, int):

    lidar_paths = {'KITTI': 'KITTI/3D/training/velodyne',
                   'CADCD': 'CADCD/2018_03_06/0001/raw/lidar_points_corrected/data',
                   'DENSE': 'DENSE/SeeingThroughFog/lidar_hdl64_strongest'}

    file_extensions = {'KITTI': '.bin',
                       'CADCD': '.bin',
                       'DENSE': '.bin'}

    num_features = {'KITTI': 4,
                    'CADCD': 4,
                    'DENSE': 5}

    image_paths = {'KITTI': 'KITTI/3D/training/image_2',
                   'CADCD': 'CADCD/2018_03_06/0001/raw/image_00/data',
                   'DENSE': 'DENSE/SeeingThroughFog/cam_stereo_left_lut'}

    assert dataset in lidar_paths.keys(), f'Unsupported dataset "{dataset}"'

    lidar_path = Path(dataset_root) / lidar_paths[dataset]
    image_path = Path(dataset_root) / image_paths[dataset]
    extension = file_extensions[dataset]
    num_features = num_features[dataset]


    return lidar_path, image_path, extension, num_features


def main():

    args, cfg = parse_config()

    lidar_path, image_path, extension, num_features = get_dataset_components(args.dataset_root, args.dataset)

    export_dir = Path(os.getcwd()).parent / 'output' / 'DEMO' / args.dataset / args.method
    export_dir.mkdir(parents=True, exist_ok=True)

    logger = common_utils.create_logger()
    logger.info('-----------------Quick Demo of OpenPCDet-------------------------')
    logger.info(f'Headless Server Mode: \t\t{headless_server}')

    demo_dataset = DemoDataset(
        dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
        root_path=lidar_path, ext=extension, logger=logger, num_features=num_features)

    logger.info(f'Total number of samples: \t{len(demo_dataset)}')

    model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
    model.load_params_from_file(filename=get_ckpt_file(args.method), logger=logger, to_cpu=True)
    model.cuda()
    model.eval()

    with torch.no_grad():

        for idx, data_dict in tqdm(enumerate(demo_dataset), total=len(demo_dataset)):

            data_dict = demo_dataset.collate_batch([data_dict])
            load_data_to_gpu(data_dict)
            pred_dicts, _ = model.forward(data_dict)

            if headless_server:

                image_name = demo_dataset.sample_file_list[idx].split('/')[-1].replace(demo_dataset.ext, '.png')

                image_source_path = image_path / image_name

                if args.copy_image and image_source_path.exists():
                    image_destination_path = export_dir / image_name
                    shutil.copyfile(image_source_path, image_destination_path)

                data_dict_cpu = {}

                for key, value in data_dict.items():
                    if key == 'points':
                        data_dict_cpu[key] = value.cpu().numpy()

                with open(f'{export_dir}/data_dict_{idx+1:06d}.pkl', 'wb') as f:
                    pickle.dump(data_dict_cpu, f, pickle.HIGHEST_PROTOCOL)

                for pred_dict in pred_dicts:

                    for key, value in pred_dict.items():
                        if isinstance(pred_dict[key], torch.Tensor):
                            pred_dict[key] = value.cpu().numpy()

                with open(f'{export_dir}/pred_dicts_{idx + 1:06d}.pkl', 'wb') as f:
                    pickle.dump(pred_dicts, f, pickle.HIGHEST_PROTOCOL)

            else:
                V.draw_scenes(
                    points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'],
                    ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels']
                )
                mlab.show(stop=True)

    logger.info('Demo done.')


if __name__ == '__main__':
    main()