import os
import sys
import time
import numpy as np
import json
import argparse
from tqdm import tqdm
import mindspore
import mindspore.dataset as ds
from mindspore import dataset as de
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.communication as comm
from mindspore import context, load_checkpoint, load_param_into_net
from mindspore import save_checkpoint
from mindspore.context import ParallelMode
from mindspore.communication import init, get_rank
from mindspore.communication.management import get_group_size
from datetime import datetime
import logging
import sys
cpath = os.getcwd()
sys.path.append(os.getcwd())
from mind3d.utils.load_yaml import load_yaml

def log_string(filename, verbosity=1, name=None):
    """log init"""
    level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
    formatter = logging.Formatter(
        "[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
    )
    logger = logging.getLogger(name)
    logger.setLevel(level_dict[verbosity])

    fh = logging.FileHandler(filename, "w")
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    logger.addHandler(sh)
    return logger


# log
output_root = './eval-log'
log_name = datetime.now().strftime('%y%m%d') + '_' + datetime.now().strftime('%H%M%S')
exp_root = os.path.join(output_root, log_name)
os.makedirs(exp_root, exist_ok=True)
logger = log_string(os.path.join(exp_root, "detection_eval.log"))


def eval_net(eval_dataset, network, opt):
    from mind3d.models.losses.groupfree_3d_eval_helper import APCalculator, parse_predictions, parse_groundtruths
    from mind3d.dataset.scannet_v2 import ScannetDetectionDataset, ScannetDatasetConfig
    DATASET_CONFIG = ScannetDatasetConfig()
    CONFIG_DICT = {'remove_empty_box': False, 'use_3d_nms': True,
            'nms_iou': 0.25, 'use_old_type_nms': False, 'cls_nms': True,
            'per_class_proposal': True, 'conf_thresh': 0.0,
            'dataset_config': DATASET_CONFIG}
    logger.info("=================start eval==================")
    # eval for one epoch
    prefixes = ['proposal_', 'head1_', 'head2_', 'head3_', 'head4_', 'head5_', 'head6_', 'last_three_', 'all_layers_']  # all heads
    ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) for iou_thresh in opt['ap_iou_thresholds']]
    mAPs = [[iou_thresh, {k: 0 for k in prefixes}] for iou_thresh in opt['ap_iou_thresholds']]
    batch_pred_map_cls_dict = {k: [] for k in prefixes}
    batch_gt_map_cls_dict = {k: [] for k in prefixes}
    
    network.set_train(False)
    eval_loss = []
    for idx, data in tqdm(enumerate(eval_dataset.create_dict_iterator(), 0)):
        # data parse
        point_cloud = data['point_clouds']
        gt_center =  data['center_label']
        angle_classes = data['heading_class_label']
        gt_angle_residuals = data['heading_residual_label']
        size_classes =  data['size_class_label']
        size_residual_label =  data['size_residual_label']
        size_gts = data['size_gts']
        target_bboxes_semcls =  data['sem_cls_label']
        target_bboxes_mask = data['box_label_mask']
        point_obj_mask =  data['point_obj_mask']
        point_instance_label =  data['point_instance_label']
        # model forward
        backbone_xyz, backbone_sample_idx, fps_sample_inx, points_obj_cls_logits, proposal_output, decoder1_output, decoder2_output, decoder3_output, decoder4_output, decoder5_output, decoder6_output =  network(point_cloud)

        batch_gt_map_cls = parse_groundtruths(gt_center, angle_classes, gt_angle_residuals, size_gts, size_classes, size_residual_label, \
                target_bboxes_mask, target_bboxes_semcls, CONFIG_DICT, size_cls_agnostic=opt['size_cls_agnostic'])
        
        prefix = 'last_three_'
        objectness_scores = ops.Concat(1)((decoder4_output[1], decoder5_output[1], decoder6_output[1]))
        pred_center = ops.Concat(1)((decoder4_output[2], decoder5_output[2], decoder6_output[2]))
        heading_scores = ops.Concat(1)((decoder4_output[3], decoder5_output[3], decoder6_output[3]))
        heading_residuals = ops.Concat(1)((decoder4_output[5], decoder5_output[5], decoder6_output[5]))
        pred_size = ops.Concat(1)((decoder4_output[9], decoder5_output[9], decoder6_output[9]))
        size_scores = ops.Concat(1)((decoder4_output[6], decoder5_output[6], decoder6_output[6]))
        size_residuals = ops.Concat(1)((decoder4_output[8], decoder5_output[8], decoder6_output[8]))
        sem_cls_scores = ops.Concat(1)((decoder4_output[10], decoder5_output[10], decoder6_output[10]))
        batch_pred_map_cls = parse_predictions(objectness_scores, pred_center, heading_scores, heading_residuals, pred_size, size_scores,\
                            size_residuals, sem_cls_scores, CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'all_layers_'
        objectness_scores = ops.Concat(1)((proposal_output[1], decoder1_output[1], decoder2_output[1], decoder3_output[1], decoder4_output[1], decoder5_output[1], decoder6_output[1]))
        pred_center = ops.Concat(1)((proposal_output[2], decoder1_output[2], decoder2_output[2], decoder3_output[2], decoder4_output[2], decoder5_output[2], decoder6_output[2]))
        heading_scores = ops.Concat(1)((proposal_output[3], decoder1_output[3], decoder2_output[3], decoder3_output[3], decoder4_output[3], decoder5_output[3], decoder6_output[3]))
        heading_residuals = ops.Concat(1)((proposal_output[5], decoder1_output[5], decoder2_output[5], decoder3_output[5], decoder4_output[5], decoder5_output[5], decoder6_output[5]))
        pred_size = ops.Concat(1)((proposal_output[9], decoder1_output[9], decoder2_output[9], decoder3_output[9], decoder4_output[9], decoder5_output[9], decoder6_output[9]))
        size_scores = ops.Concat(1)((proposal_output[6], decoder1_output[6], decoder2_output[6], decoder3_output[6], decoder4_output[6], decoder5_output[6], decoder6_output[6]))
        size_residuals = ops.Concat(1)((proposal_output[8], decoder1_output[8], decoder2_output[8], decoder3_output[8], decoder4_output[8], decoder5_output[8], decoder6_output[8]))
        sem_cls_scores = ops.Concat(1)((proposal_output[10], decoder1_output[10], decoder2_output[10], decoder3_output[10], decoder4_output[10], decoder5_output[10], decoder6_output[10]))
        batch_pred_map_cls = parse_predictions(objectness_scores, pred_center, heading_scores, heading_residuals, pred_size, size_scores,\
                            size_residuals, sem_cls_scores, CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'proposal_'
        batch_pred_map_cls = parse_predictions(proposal_output[1], proposal_output[2], proposal_output[3], proposal_output[5], proposal_output[9], proposal_output[6], \
                    proposal_output[8], proposal_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'head1_'
        batch_pred_map_cls = parse_predictions(decoder1_output[1], decoder1_output[2], decoder1_output[3], decoder1_output[5], decoder1_output[9], decoder1_output[6], \
                    decoder1_output[8], decoder1_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'head2_'
        batch_pred_map_cls = parse_predictions(decoder2_output[1], decoder2_output[2], decoder2_output[3], decoder2_output[5], decoder2_output[9], decoder2_output[6], \
                    decoder2_output[8], decoder2_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'head3_'
        batch_pred_map_cls = parse_predictions(decoder3_output[1], decoder3_output[2], decoder3_output[3], decoder3_output[5], decoder3_output[9], decoder3_output[6], \
                    decoder3_output[8], decoder3_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'head4_'
        batch_pred_map_cls = parse_predictions(decoder4_output[1], decoder4_output[2], decoder4_output[3], decoder4_output[5], decoder4_output[9], decoder4_output[6], \
                    decoder4_output[8], decoder4_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'head5_'
        batch_pred_map_cls = parse_predictions(decoder5_output[1], decoder5_output[2], decoder5_output[3], decoder5_output[5], decoder5_output[9], decoder5_output[6], \
                    decoder5_output[8], decoder5_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
        
        prefix = 'head6_'
        batch_pred_map_cls = parse_predictions(decoder6_output[1], decoder6_output[2], decoder6_output[3], decoder6_output[5], decoder6_output[9], decoder6_output[6], \
                    decoder6_output[8], decoder6_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
        batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
        batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
   
    

    # summarize map        
    mAP = 0.0
    for prefix in prefixes:
        for (batch_pred_map_cls, batch_gt_map_cls) in zip(batch_pred_map_cls_dict[prefix], batch_gt_map_cls_dict[prefix]):
            for ap_calculator in ap_calculator_list:
                ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
        # Evaluate average precision
        for i, ap_calculator in enumerate(ap_calculator_list):
            metrics_dict = ap_calculator.compute_metrics()
            logger.info(f"=====================>{prefix} IOU THRESH: {opt['ap_iou_thresholds'][i]}<=====================")
            for key in metrics_dict:
                logger.info(f'{key} {metrics_dict[key]}')
            if prefix == 'head6_' and ap_calculator.ap_iou_thresh > 0.3:
                mAP = metrics_dict['mAP']
            mAPs[i][1][prefix] = metrics_dict['mAP']
            ap_calculator.reset()
    for mAP in mAPs:
        logger.info(f'IoU[{mAP[0]}]:\t' + ''.join([f'{key}: {mAP[1][key]:.4f} \t' for key in sorted(mAP[1].keys())]))
       

def eval(opt):
    if opt['model_name'] == 'groupfree_3d':
        from mind3d.models.groupfree_3d import Groupfree3DModel
        from mind3d.models.losses.groupfree_3d_eval_helper import APCalculator, parse_predictions, parse_groundtruths
        from mind3d.dataset.scannet_v2 import ScannetDetectionDataset, ScannetDatasetConfig
        DATASET_CONFIG = ScannetDatasetConfig()
        CONFIG_DICT = {'remove_empty_box': False, 'use_3d_nms': True,
                'nms_iou': 0.25, 'use_old_type_nms': False, 'cls_nms': True,
                'per_class_proposal': True, 'conf_thresh': 0.0,
                'dataset_config': DATASET_CONFIG}
        context.set_context(mode=context.PYNATIVE_MODE,#GRAPH_MODE, PYNATIVE_MODE
                            device_target="GPU",
                            pynative_synchronize=False,
                            max_call_depth=1000)
    elif opt['model_name'] == 'pointpilliars':
        from mind3d.dataset.kitti.src.core.eval_utils import get_official_eval_result
        from mind3d.dataset.kitti.src.predict import predict
        from mind3d.dataset.kitti.src.predict import predict_kitti_to_anno
        from mind3d.dataset.kitti.src.utils import get_config
        from mind3d.dataset.kitti.src.utils import get_model_dataset
        from mind3d.dataset.kitti.src.utils import get_params_for_net

        args.cfg_path = opt['eval_set_up']['cfg_path'] 
        args.ckpt_path = opt['eval_set_up']['ckpt_path'] 
        args.eval_path = opt['eval_set_up']['eval_path'] 
        args.device_target = opt['eval_set_up']['device_target'] 
        args.is_distributed = opt['eval_set_up']['is_distributed'] 
        args.GPU_ID = opt['eval_set_up']['GPU_ID'] 
        
        cfg_path = args.cfg_path
        ckpt_path = args.ckpt_path 

        cfg = get_config(cfg_path)

        device_id = int(os.getenv('DEVICE_ID', args.GPU_ID))
        device_target = args.device_target

        context.set_context(mode=context.GRAPH_MODE, device_target=device_target, device_id=device_id)

    """set datasets"""
    if opt['model_name'] == 'groupfree_3d':
        eval_dataset = ScannetDetectionDataset('val', num_points=opt['num_point'], data_root=opt['data_root'])
        eval_ds = ds.GeneratorDataset(eval_dataset, ["point_clouds", "center_label", "heading_class_label", "heading_residual_label",
                                                        "size_class_label", "size_residual_label", "size_gts", "sem_cls_label", "box_label_mask",
                                                        "point_obj_mask", "point_instance_label", "scan_idx", "pcl_color"],
                                                        num_samples=None, num_parallel_workers = 1, shuffle=False)
        eval_ds = eval_ds.batch(batch_size=2, drop_remainder=True)
    elif opt['model_name'] == 'pointpilliars':
        model_cfg = cfg['model']

        center_limit_range = model_cfg['post_center_limit_range']

        pointpillarsnet, eval_dataset, box_coder = get_model_dataset(cfg, False)

        params = load_checkpoint(ckpt_path)
        new_params = get_params_for_net(params)
        load_param_into_net(pointpillarsnet, new_params)

        eval_input_cfg = cfg['eval_input_reader']

        eval_column_names = eval_dataset.data_keys

        point_ds = de.GeneratorDataset(
            eval_dataset,
            column_names=eval_column_names,
            python_multiprocessing=True,
            num_parallel_workers=3,
            max_rowsize=100,
            shuffle=False
        )
        batch_size = eval_input_cfg['batch_size']
        point_ds = point_ds.batch(batch_size, drop_remainder=False)
        data_loader = point_ds.create_dict_iterator(num_epochs=1)

        class_names = list(eval_input_cfg['class_names'])

        dt_annos = []
        gt_annos = [info["annos"] for info in eval_dataset.kitti_infos]
        
        
    """Load Module"""
    if opt['model_name'] == 'groupfree_3d':
        num_input_channel = 0
        model = Groupfree3DModel(num_class=DATASET_CONFIG.num_class,
                                 num_heading_bin=DATASET_CONFIG.num_heading_bin,
                                 num_size_cluster=DATASET_CONFIG.num_size_cluster,
                                 mean_size_arr=DATASET_CONFIG.mean_size_arr,
                                 input_feature_dim=num_input_channel,
                                 width=opt['width'],
                                 bn_momentum=opt['bn_momentum'],
                                 sync_bn=False,
                                 num_proposal=opt['num_target'],
                                 sampling=opt['sampling'],
                                 dropout=opt['transformer_dropout'],
                                 activation=opt['transformer_activation'],
                                 nhead=opt['nhead'],
                                 num_decoder_layers=opt['num_decoder_layers'],
                                 dim_feedforward=opt['dim_feedforward'],
                                 self_position_embedding=opt['self_position_embedding'],
                                 cross_position_embedding=opt['cross_position_embedding'],
                                 size_cls_agnostic=False)
        model.set_train()
    else:
        pass

    """load pretrained ckpt"""
    if opt['model_name'] == 'groupfree_3d':
        param_dict = load_checkpoint(opt['checkpoint_path'])
        load_param_into_net(model, param_dict)
        eval_net(eval_ds, model, opt)
    elif opt['model_name'] == 'pointpilliars':
        log_freq = 100
        len_dataset = len(eval_dataset)
        log_test_txt_path = open(os.path.join(args.eval_path, 'log_eval.txt'),mode="a",encoding="utf-8")
        print('+++++++++++++++++++++++++++++++++++++++++++++++++++++Start eval++++++++++++++++++++++++++++++++++++++++++++++++++++')
        print('+++++++++++++++++++++++++++++++++++++++++++++++++++++Start eval++++++++++++++++++++++++++++++++++++++++++++++++++++', file = log_test_txt_path)
        print('Length of testing dataset', len_dataset)
        print('Length of testing dataset', len_dataset, file = log_test_txt_path)
        start = time.time()
        for i, data in enumerate(data_loader):
            voxels = data["voxels"]
            num_points = data["num_points"]
            coors = data["coordinates"]
            bev_map = data.get('bev_map', False)

            preds = pointpillarsnet(voxels, num_points, coors, bev_map)
            if len(preds) == 2:
                preds = {
                    'box_preds': preds[0],
                    'cls_preds': preds[1],
                }
            else:
                preds = {
                    'box_preds': preds[0],
                    'cls_preds': preds[1],
                    'dir_cls_preds': preds[2]
                }
            preds = predict(data, preds, model_cfg, box_coder)

            dt_annos += predict_kitti_to_anno(preds,
                                            data,
                                            class_names,
                                            center_limit_range)

            if i % log_freq == 0 and i > 0:
                time_used = time.time() - start
                print(f'processed: {i * batch_size}/{len_dataset} imgs, Total_time/sec: {time_used} ', flush=True)
                print(f'processed: {i * batch_size}/{len_dataset} imgs, Total_time/sec: {time_used} ', flush=True, file = log_test_txt_path)

        result = get_official_eval_result(
            gt_annos,
            dt_annos,
            class_names,
        )
        print(result)
        print(result, file = log_test_txt_path)
        print('+++++++++++++++++++++++++++++++++++++++++++++++++++++End of eval++++++++++++++++++++++++++++++++++++++++++++++++++++')
        print('+++++++++++++++++++++++++++++++++++++++++++++++++++++End of eval++++++++++++++++++++++++++++++++++++++++++++++++++++', file = log_test_txt_path)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='3d detection train')
    parser.add_argument('--opt', default='/data1/mind3d-master/configs/pointpillars/car_xyres16.yaml', help='Path to config file.')
    args = parser.parse_known_args()[0]
    opt = load_yaml(args.opt)
    eval(opt)

