import warnings
warnings.filterwarnings("ignore")
import os
import numpy as np
import argparse
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import mindspore
import mindspore.dataset as ds
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.communication as comm
from mindspore import context, load_checkpoint, load_param_into_net
from mindspore import save_checkpoint
from mindspore.context import ParallelMode
from mindspore.communication import init, get_rank
from mindspore.communication.management import get_group_size
from datetime import datetime
import logging
from mindspore import context
from mindspore import dataset as de
from mindspore import load_checkpoint
from mindspore import load_param_into_net
import json
import sys
cpath = os.getcwd()
sys.path.append(os.getcwd())
from mind3d.utils.load_yaml import load_yaml
from mind3d.dataset.kitti.src.predict import predict
from mind3d.dataset.kitti.src.utils import get_config
from mind3d.dataset.kitti.src.utils import get_model_dataset
from mind3d.dataset.kitti.src.utils import get_params_for_net

warnings.filterwarnings('ignore')



def log_string(filename, verbosity=1, name=None):
    """log init"""
    level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
    formatter = logging.Formatter(
        "[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
    )
    logger = logging.getLogger(name)
    logger.setLevel(level_dict[verbosity])

    fh = logging.FileHandler(filename, "w")
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    logger.addHandler(sh)
    return logger


def show_pointcloud_det(data, bb=None, save=None):
    data = data.astype(np.float32)
    plt.figure(figsize=(8, 6))
    ax = plt.subplot(111, projection='3d')
    ax.view_init(elev=30, azim=90)
    plt.axis('off')
    ax.scatter(data[:, 0], data[:, 1], data[:, 2], s=8, marker='.')
    
    # bb[:, 3]=0.3
    # bb[:, 4]=0.3
    # bb[:, 5]=0.3
    x, y, z = bb[:, 0] - bb[:, 3]/2, bb[:, 1]- bb[:, 4]/2, bb[:, 2]- bb[:, 5]/2
    # x, y, z = bb[:, 0], bb[:, 1], bb[:, 2]
    dx, dy, dz = bb[:, 3], bb[:, 4], bb[:, 5]
    ax.bar3d(x, y, z, dx, dy, dz, zsort='average', edgecolor='red', linewidth=0.5, alpha=0)
    if save:
        plt.savefig(save, dpi=500, bbox_inches='tight', transparent=True)
        plt.close()
    else:
        plt.show()

def sigmoid(x):
    ''' Numpy function for softmax'''
    s = 1 / (1 + np.exp(-x))
    return s


def softmax(x):
    ''' Numpy function for softmax'''
    shape = x.shape
    probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))
    probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)
    return probs

# show_pointcloud_det(np.float32(((1, 1, 1), (2, 1, 2), (1, 2, 1), (1.5, 1.5, 1.5), (2, 2, 2))), np.float32(((0, 0, 0, 1, 1, 1), (1, 1, 1, 1, 1, 1))))


def infer_net(eval_dataset, network, opt):
    logger.info("=================start infer==================")
    # eval for one epoch
    
    network.set_train(False)
    for idx, data in tqdm(enumerate(eval_dataset.create_dict_iterator(), 0)):
        # data parse
        point_cloud = data['point_clouds']
        gt_center =  data['center_label']
        angle_classes = data['heading_class_label']
        gt_angle_residuals = data['heading_residual_label']
        size_classes =  data['size_class_label']
        size_residual_label =  data['size_residual_label']
        size_gts = data['size_gts']
        target_bboxes_semcls =  data['sem_cls_label']
        target_bboxes_mask = data['box_label_mask']
        point_obj_mask =  data['point_obj_mask']
        point_instance_label =  data['point_instance_label']
        # model forward
        backbone_xyz, backbone_sample_idx, fps_sample_inx, points_obj_cls_logits, proposal_output, decoder1_output, decoder2_output, decoder3_output, decoder4_output, decoder5_output, decoder6_output =  network(point_cloud)
        
        objectness_scores = decoder6_output[1]
        pred_center = decoder6_output[2]
        heading_scores = decoder6_output[3]
        heading_residuals = decoder6_output[5]
        size_scores = decoder6_output[6]
        size_residuals = decoder6_output[8]
        pred_size = decoder6_output[9]
        
        obj_logits = objectness_scores.asnumpy()
        obj_prob = sigmoid(obj_logits)[:, :, 0]  # (B,K)
        pred_heading_class = ops.Argmax()(heading_scores)
        pred_heading_residual = ops.GatherD()(heading_residuals, 2, ops.ExpandDims()(pred_heading_class, -1))  # B,num_proposal,1
        
        pred_size_class = ops.Argmax()(size_scores)  # B,num_proposal
        pred_size_residual = ops.GatherD()(size_residuals, 2, ops.tile(ops.ExpandDims()(ops.ExpandDims()(pred_size_class, -1), -1), (1, 1, 1, 3)))  # B,num_proposal,1,3
        pred_size_residual = pred_size_residual.squeeze(2)
        
        
        for i in range(pred_center.shape[0]):
            obbs = []
            for j in range(pred_center.shape[1]):
                obb = DATASET_CONFIG.param2obb(pred_center[i,j,0:3], pred_heading_class[i,j], pred_heading_residual[i,j], pred_size_class[i,j], pred_size_residual[i,j], pred_size[i,j])
                if obj_prob[i,j]>0.5:
                    obbs.append(obb[:6])
            if len(obbs)>0:
                obbs = np.vstack(tuple(obbs)) # (num_proposal, 6)
                np.savetxt('pointcloud_{}.txt'.format(i), point_cloud.asnumpy()[i,:,:], fmt='%.4f', delimiter=',')
                np.savetxt('predbox_{}.txt'.format(i), obbs, fmt='%.4f', delimiter=',')               
                print('infer success!')
                
        for i in range(gt_center.shape[0]):
            obbs = []
            for j in range(gt_center.shape[1]):
                obb = DATASET_CONFIG.param2obb(gt_center[i,j,0:3], angle_classes[i,j], gt_angle_residuals[i,j], size_classes[i,j], size_residual_label[i,j])
                obbs.append(obb[:6])
            if len(obbs)>0:
                obbs = np.vstack(tuple(obbs)) # (num_proposal, 6)     
                np.savetxt('gtbox_{}.txt'.format(i), obbs, fmt='%.4f', delimiter=',') 
                print('infer success!')
        



def eval(opt):
    """set datasets"""
    if opt['model_name'] == 'groupfree_3d':
        from mind3d.models.groupfree_3d import Groupfree3DModel
        from mind3d.models.losses.groupfree_3d_eval_helper import APCalculator, parse_predictions, parse_groundtruths
        from mind3d.dataset.scannet_v2 import ScannetDetectionDataset, ScannetDatasetConfig
        # log
        DATASET_CONFIG = ScannetDatasetConfig()
        output_root = './eval-log'
        log_name = datetime.now().strftime('%y%m%d') + '_' + datetime.now().strftime('%y%m%d')
        exp_root = os.path.join(output_root, log_name)
        os.makedirs(exp_root, exist_ok=True)
        logger = log_string(os.path.join(exp_root, "groupfree-3d.log"))
        opt = load_yaml(args.cfg_path)
        """single GPU"""
        context.set_context(mode=context.PYNATIVE_MODE,#GRAPH_MODE, PYNATIVE_MODE
                            device_target="GPU",
                            pynative_synchronize=False,
                            max_call_depth=1000)
        eval_dataset = ScannetDetectionDataset('val', num_points=opt['num_point'], data_root=opt['data_root'])
        eval_ds = ds.GeneratorDataset(eval_dataset, ["point_clouds", "center_label", "heading_class_label", "heading_residual_label",
                                                        "size_class_label", "size_residual_label", "size_gts", "sem_cls_label", "box_label_mask",
                                                        "point_obj_mask", "point_instance_label", "scan_idx", "pcl_color"],
                                                    num_samples=2, num_parallel_workers = 1, shuffle=False)
        eval_ds = eval_ds.batch(batch_size=2, drop_remainder=True)
    elif opt['model_name'] == 'pointpilliars':
        """infer """
        print('start infer point clouds')     
        args.cfg_path = opt['infer_set_up']['cfg_path'] 
        args.ckpt_path = opt['infer_set_up']['ckpt_path'] 
        args.device_target = opt['infer_set_up']['device_target'] 
        args.is_distributed = opt['infer_set_up']['is_distributed'] 
        args.GPU_ID = opt['infer_set_up']['GPU_ID'] 
        args.result_file = opt['infer_set_up']['result_file'] 
        cfg_path = args.cfg_path
        ckpt_path = args.ckpt_path 

        cfg = get_config(cfg_path)

        device_id = int(os.getenv('DEVICE_ID', args.GPU_ID))
        device_target = args.device_target

        context.set_context(mode=context.GRAPH_MODE, device_target=device_target, device_id=device_id)

        model_cfg = cfg['model']


    """Load Module"""
    if opt['model_name'] == 'groupfree_3d':
        num_input_channel = 0
        model = Groupfree3DModel(num_class=DATASET_CONFIG.num_class,
                                num_heading_bin=DATASET_CONFIG.num_heading_bin,
                                num_size_cluster=DATASET_CONFIG.num_size_cluster,
                                mean_size_arr=DATASET_CONFIG.mean_size_arr,
                                input_feature_dim=num_input_channel,
                                width=opt['width'],
                                bn_momentum=opt['bn_momentum'],
                                sync_bn=False,
                                num_proposal=opt['num_target'],
                                sampling=opt['sampling'],
                                dropout=opt['transformer_dropout'],
                                activation=opt['transformer_activation'],
                                nhead=opt['nhead'],
                                num_decoder_layers=opt['num_decoder_layers'],
                                dim_feedforward=opt['dim_feedforward'],
                                self_position_embedding=opt['self_position_embedding'],
                                cross_position_embedding=opt['cross_position_embedding'],
                                size_cls_agnostic=False)
        model.set_train()
    elif opt['model_name'] == 'pointpilliars':
        pointpillarsnet, eval_dataset, box_coder = get_model_dataset(cfg, False)

        params = load_checkpoint(ckpt_path)
        new_params = get_params_for_net(params)
        load_param_into_net(pointpillarsnet, new_params)

        eval_input_cfg = cfg['eval_input_reader']

        eval_column_names = eval_dataset.data_keys

        ds = de.GeneratorDataset(
            eval_dataset,
            column_names=eval_column_names,
            python_multiprocessing=True,
            num_parallel_workers=3,
            max_rowsize=100,
            shuffle=False
        )
        
        batch_size = eval_input_cfg['batch_size']
        ds = ds.batch(batch_size, drop_remainder=False)
        data_loader = ds.create_dict_iterator(num_epochs=1)

    "load pretrained ckpt"
    if opt['model_name'] == 'groupfree_3d':
        param_dict = load_checkpoint('/home/amax/tyolm/codes/groupfree-3d-ms-v3/log-Group-Free-3D/groupfree_3d/scannet_1677047186/91585450/step_30000.ckpt')
        load_param_into_net(model, param_dict)
        infer_net(eval_ds, model, opt)
    elif opt['model_name'] == 'pointpilliars':
        dt_annos = {}

        for i, data in enumerate(data_loader):
            print('infering point cloud index is:', i)
            voxels = data["voxels"]
            num_points = data["num_points"]
            coors = data["coordinates"]
            bev_map = data.get('bev_map', False)

            preds = pointpillarsnet(voxels, num_points, coors, bev_map)
            if len(preds) == 2:
                preds = {
                    'box_preds': preds[0],
                    'cls_preds': preds[1],
                }
            else:
                preds = {
                    'box_preds': preds[0],
                    'cls_preds': preds[1],
                    'dir_cls_preds': preds[2]
                }
            preds = predict(data, preds, model_cfg, box_coder)
            
            preds[0]['bbox'] = preds[0]['bbox'].asnumpy().tolist()
            preds[0]['box3d_camera'] = preds[0]['box3d_camera'].asnumpy().tolist()
            preds[0]['box3d_lidar'] = preds[0]['box3d_lidar'].asnumpy().tolist()
            preds[0]['scores'] = preds[0]['scores'].asnumpy().tolist()
            preds[0]['label_preds'] = preds[0]['label_preds'].asnumpy().tolist()
            preds[0]['image_idx'] = preds[0]['image_idx'].asnumpy().tolist()
            
            dt_annos[str(i)] = preds
        
        dict_json = json.dumps(dt_annos)
        with open(args.result_file, 'w') as json_file:
            json_file.write(dict_json)   



if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='3d detection train')
    parser.add_argument('--opt', default='/data1/mind3d-master/configs/pointpillars/car_xyres16.yaml', help='Path to config file.')
    args = parser.parse_known_args()[0]
    opt = load_yaml(args.opt)
    eval(opt)

