import warnings
warnings.filterwarnings("ignore")
import argparse
import datetime
import os
import warnings
from pathlib import Path
import os
import time
import numpy as np
import argparse
import mindspore
import mindspore.dataset as ds
import mindspore.nn as nn
from mindspore import context, ops
from mindspore import save_checkpoint
from mindspore.communication import init, get_rank
from mindspore.communication.management import get_group_size
from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean, _get_parallel_mode)
from mindspore import context
from mindspore import dataset as de
from mindspore import nn
from mindspore import set_seed
from mindspore.communication.management import get_group_size
from mindspore.communication.management import get_rank
from mindspore.communication.management import init
from mindspore.context import ParallelMode
from mindspore.nn.optim import Adam
from mindspore.train.callback import CheckpointConfig
from mindspore.train.callback import ModelCheckpoint
from mindspore.train.callback import RunContext
from mindspore.train.callback import _InternalCallbackParam
import sys
cpath = os.getcwd()
sys.path.append(os.getcwd())
from mind3d.utils.logger import setup_logger
#from mind3d.models.groupfree_3d import Groupfree3DModel
#from mind3d.models.losses.groupfree_3d_loss_helper import get_loss
#from mind3d.dataset.scannet_v2 import ScannetDetectionDataset, ScannetDatasetConfig
#from mind3d.utils.load_yaml import load_yaml
from mind3d.dataset.kitti.src.pointpillars import PointPillarsWithLossCell
from mind3d.dataset.kitti.src.pointpillars import TrainingWrapper
from mind3d.dataset.kitti.src.utils import get_config
from mind3d.dataset.kitti.src.utils import get_model_dataset
from mind3d.utils.load_yaml import load_yaml

'''
DATASET_CONFIG = ScannetDatasetConfig()
CONFIG_DICT = {'remove_empty_box': False, 'use_3d_nms': True,
               'nms_iou': 0.25, 'use_old_type_nms': False, 'cls_nms': True,
               'per_class_proposal': True, 'conf_thresh': 0.0,
               'dataset_config': DATASET_CONFIG}'''

def set_default(args):
    """set default"""
    set_seed(0)

    cfg_path = Path(args.cfg_path)
    save_path = Path(args.save_path)
    save_path.mkdir(exist_ok=True, parents=True)

    cfg = get_config(cfg_path)

    is_distributed = int(args.is_distributed)
    device_target = args.device_target

    context.set_context(mode=context.GRAPH_MODE, device_target=device_target)

    if is_distributed:
        # init distributed
        init()
        rank = get_rank()
        device_num = get_group_size()
        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(
            parallel_mode=ParallelMode.DATA_PARALLEL,
            gradients_mean=True,
            device_num=device_num,
        )
    else:
        rank = 0
        device_num = 1
        device_id = int(os.getenv('DEVICE_ID', args.GPU_ID))
        context.set_context(device_id=device_id)

    return cfg, rank, device_num

class TrainOneStepCellGradsNorm(nn.Cell):
    def __init__(self, network, optimizer):
        super(TrainOneStepCellGradsNorm, self).__init__(auto_prefix=False)
        self.network = network  # 带loss的网络结构
        self.network.set_grad()   # PYNATIVE模式时需要，如果为True，则在执行正向网络时，将生成需要计算梯度的反向网络。
        self.optimizer = optimizer   # 优化器，用于参数更新
        self.weights = self.optimizer.parameters    # 获取优化器的参数
        self.grad = ops.GradOperation(get_by_list=True)   # 获取所有输入和参数的梯度

        # 并行计算相关逻辑
        self.reducer_flag = False
        self.grad_reducer = ops.identity
        self.parallel_mode = _get_parallel_mode()
        self.reducer_flag = self.parallel_mode in (mindspore.ParallelMode.DATA_PARALLEL, mindspore.ParallelMode.HYBRID_PARALLEL)
        if self.reducer_flag:
            self.mean = _get_gradients_mean()
            self.degree = _get_device_num()
            self.grad_reducer = nn.DistributedGradReducer(self.weights, self.mean, self.degree)

    def construct(self, *inputs):
        loss = self.network(*inputs)    # 运行正向网络，获取loss
        grads = self.grad(self.network, self.weights)(*inputs) # 获得所有Parameter自由变量的梯度
        clip = nn.ClipByNorm()
        # clip_norm = ms.Tensor(ms.numpy.array([35]).astype(ms.numpy.float32))
        # grads = grad_op(grads)    # 可以在这里加对梯度的一些计算逻辑，如梯度裁剪
        # grads=ops.clip_by_value(grads,0.1)
        grads=ops.clip_by_global_norm(grads)
        grads = self.grad_reducer(grads)  # 梯度聚合
        self.optimizer(grads)    # 优化器更新参数
        return loss

class CustomWithLossCell(nn.Cell):
    "customized loss cell"
    def __init__(self, module, loss_func):
        super(CustomWithLossCell, self).__init__(auto_prefix=False)
        self._module = module
        self._loss_func = loss_func

    def construct(self, data):
        point_cloud = data['point_clouds']
        gt_center =  data['center_label']
        angle_classes = data['heading_class_label']
        gt_angle_residuals = data['heading_residual_label']
        size_gts = data['size_gts']
        size_classes =  data['size_class_label']
        size_residual_label =  data['size_residual_label']
        target_bboxes_semcls =  data['sem_cls_label']
        target_bboxes_mask = data['box_label_mask']
        point_obj_mask =  data['point_obj_mask']
        point_instance_label =  data['point_instance_label']

        
        backbone_xyz, backbone_sample_idx, fps_sample_inx, points_obj_cls_logits, proposal_output, decoder1_output, decoder2_output, decoder3_output, decoder4_output, decoder5_output, decoder6_output = self._module(point_cloud)
        
        KPS_loss_sum, loss_proposal, _ = self._loss_func(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, proposal_output[1], point_obj_mask, proposal_output[2], gt_center, proposal_output[3], angle_classes, proposal_output[4], gt_angle_residuals, point_instance_label, \
            proposal_output[6], size_classes, proposal_output[7], size_residual_label, proposal_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
        _, loss_decoder1, _ = self._loss_func(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, decoder1_output[1], point_obj_mask, decoder1_output[2], gt_center, decoder1_output[3], angle_classes, decoder1_output[4], gt_angle_residuals, point_instance_label, \
            decoder1_output[6], size_classes, decoder1_output[7], size_residual_label, decoder1_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
        _, loss_decoder2, _ = self._loss_func(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, decoder2_output[1], point_obj_mask, decoder2_output[2], gt_center, decoder2_output[3], angle_classes, decoder2_output[4], gt_angle_residuals, point_instance_label, \
            decoder2_output[6], size_classes, decoder2_output[7], size_residual_label, decoder2_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
        _, loss_decoder3, _ = self._loss_func(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, decoder3_output[1], point_obj_mask, decoder3_output[2], gt_center, decoder3_output[3], angle_classes, decoder3_output[4], gt_angle_residuals, point_instance_label, \
            decoder3_output[6], size_classes, decoder3_output[7], size_residual_label, decoder3_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
        _, loss_decoder4, _ = self._loss_func(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, decoder4_output[1], point_obj_mask, decoder4_output[2], gt_center, decoder4_output[3], angle_classes, decoder4_output[4], gt_angle_residuals, point_instance_label, \
            decoder4_output[6], size_classes, decoder4_output[7], size_residual_label, decoder4_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
        _, loss_decoder5, _ = self._loss_func(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, decoder5_output[1], point_obj_mask, decoder5_output[2], gt_center, decoder5_output[3], angle_classes, decoder5_output[4], gt_angle_residuals, point_instance_label, \
            decoder5_output[6], size_classes, decoder5_output[7], size_residual_label, decoder5_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
        _, loss_decoder6, _ = self._loss_func(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask, decoder6_output[1], point_obj_mask, decoder6_output[2], gt_center, decoder6_output[3], angle_classes, decoder6_output[4], gt_angle_residuals, point_instance_label, \
            decoder6_output[6], size_classes, decoder6_output[7], size_residual_label, decoder6_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
        loss = KPS_loss_sum + (loss_proposal + loss_decoder1 + loss_decoder2 + loss_decoder3 + loss_decoder4 + loss_decoder5 + loss_decoder6) / 7.0

        return loss


def train_net(train_dataset, eval_dataset, network, net_train, opt, logger):
    logger.info("=================start training==================")
    step_size = train_dataset.get_dataset_size()
    # opt['step_val_freq'] = opt['val_freq'] * opt['step_size_per_epoch']
    opt['ckpt_save_dirt_freq'] = opt['ckpt_save_dirt_freq'] * opt['step_size_per_epoch']
    # opt['step_val_start'] = opt['step_size_per_epoch'] * 200
    network.set_train(True)
    loss_list = []
    for iter_id, data in enumerate(train_dataset.create_dict_iterator(), 0):
        loss = net_train(data)
        loss_num = loss.asnumpy()
        loss_list.append(loss_num)
        epoch = int((iter_id + 1) /  opt['step_size_per_epoch'])
        if (iter_id + 1) % opt['step_size_per_epoch'] == 0:                        
            logger.info("Epoch: {}, train_loss: {}".format(epoch, np.mean(np.array(loss_list))))
            # logger.info("Epoch: {}, decoder lr: {}, nondecoder lr: {}".format(epoch, optim.get_lr()[0], optim.get_lr()[-1]))
            loss_list = []
        # save checkpoint
        if (iter_id+1) % opt['ckpt_save_dirt_freq'] == 0 or (iter_id +1) == step_size:
            if get_rank() == 0:        
                logger.info('saving model...')
                save_path = os.path.join(opt['log_dir'], 'step_' + str(iter_id+1) + '.ckpt')
                save_checkpoint(network, save_path)
        '''    
        # eval for one epoch
        if epoch > 200 and (iter_id+1) % opt['step_val_freq'] == 0 or iter_id == step_size:
            prefixes = ['proposal_', 'head1_', 'head2_', 'head3_', 'head4_', 'head5_', 'head6_']  # all heads
            ap_calculator_list = [APCalculator(iou_thresh, DATASET_CONFIG.class2type) for iou_thresh in opt['ap_iou_thresholds']]
            mAPs = [[iou_thresh, {k: 0 for k in prefixes}] for iou_thresh in opt['ap_iou_thresholds']]
            batch_pred_map_cls_dict = {k: [] for k in prefixes}
            batch_gt_map_cls_dict = {k: [] for k in prefixes}
            
            network.set_train(False)
            eval_loss = 0.0
            for idx, data in tqdm(enumerate(eval_dataset.create_dict_iterator(), 0)):
                # data parse
                point_cloud = data['point_clouds']
                gt_center =  data['center_label']
                angle_classes = data['heading_class_label']
                gt_angle_residuals = data['heading_residual_label']
                size_classes =  data['size_class_label']
                size_residual_label =  data['size_residual_label']
                size_gts = data['size_gts']
                target_bboxes_semcls =  data['sem_cls_label']
                target_bboxes_mask = data['box_label_mask']
                point_obj_mask =  data['point_obj_mask']
                point_instance_label =  data['point_instance_label']
                # model forward
                backbone_xyz, backbone_sample_idx, fps_sample_inx, points_obj_cls_logits, proposal_output, decoder1_output, decoder2_output, decoder3_output, decoder4_output, decoder5_output, decoder6_output =  network(point_cloud)
               
                KPS_loss_sum, loss_proposal, _ = get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask,proposal_output[1], point_obj_mask, proposal_output[2], gt_center, proposal_output[3], angle_classes, proposal_output[4], gt_angle_residuals, point_instance_label, \
                    proposal_output[6], size_classes, proposal_output[7], size_residual_label, proposal_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
        
                _, loss_decoder1, _ = get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask,decoder1_output[1], point_obj_mask, decoder1_output[2], gt_center, decoder1_output[3], angle_classes, decoder1_output[4], gt_angle_residuals, point_instance_label, \
                    decoder1_output[6], size_classes, decoder1_output[7], size_residual_label, decoder1_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
                
                _, loss_decoder2, _ = get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask,decoder2_output[1], point_obj_mask, decoder2_output[2], gt_center, decoder2_output[3], angle_classes, decoder2_output[4], gt_angle_residuals, point_instance_label, \
                    decoder2_output[6], size_classes, decoder2_output[7], size_residual_label, decoder2_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
                
                _, loss_decoder3, _ = get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask,decoder3_output[1], point_obj_mask, decoder3_output[2], gt_center, decoder3_output[3], angle_classes, decoder3_output[4], gt_angle_residuals, point_instance_label, \
                    decoder3_output[6], size_classes, decoder3_output[7], size_residual_label, decoder3_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
                
                _, loss_decoder4, _ = get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask,decoder4_output[1], point_obj_mask, decoder4_output[2], gt_center, decoder4_output[3], angle_classes, decoder4_output[4], gt_angle_residuals, point_instance_label, \
                    decoder4_output[6], size_classes, decoder4_output[7], size_residual_label, decoder4_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
                
                _, loss_decoder5, _ = get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask,decoder5_output[1], point_obj_mask, decoder5_output[2], gt_center, decoder5_output[3], angle_classes, decoder5_output[4], gt_angle_residuals, point_instance_label, \
                    decoder5_output[6], size_classes, decoder5_output[7], size_residual_label, decoder5_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
                
                _, loss_decoder6, _ = get_loss(backbone_xyz, points_obj_cls_logits, size_gts, target_bboxes_mask,decoder6_output[1], point_obj_mask, decoder6_output[2], gt_center, decoder6_output[3], angle_classes, decoder6_output[4], gt_angle_residuals, point_instance_label, \
                    decoder6_output[6], size_classes, decoder6_output[7], size_residual_label, decoder6_output[10], target_bboxes_semcls, backbone_sample_idx, fps_sample_inx, DATASET_CONFIG.mean_size_arr)
                
                loss = KPS_loss_sum + (loss_proposal + loss_decoder1 + loss_decoder2 + loss_decoder3 + loss_decoder4 + loss_decoder5 + loss_decoder6) / 7.0
                
                eval_loss += loss
                    
                # batch_gt_map_cls = parse_groundtruths(gt_center, angle_classes, gt_angle_residuals, size_gts, size_classes, size_residual_label, \
                        # target_bboxes_mask, target_bboxes_semcls, CONFIG_DICT, size_cls_agnostic=opt['size_cls_agnostic'])
                        
                # prefix = 'proposal_'
                # batch_pred_map_cls = parse_predictions(proposal_output[1], proposal_output[2], proposal_output[3], proposal_output[5], proposal_output[9], proposal_output[6], \
                            # proposal_output[8], proposal_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
                # batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
                # batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
                
                # prefix = 'head1_'
                # batch_pred_map_cls = parse_predictions(decoder1_output[1], decoder1_output[2], decoder1_output[3], decoder1_output[5], decoder1_output[9], decoder1_output[6], \
                            # decoder1_output[8], decoder1_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
                # batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
                # batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
                
                # prefix = 'head2_'
                # batch_pred_map_cls = parse_predictions(decoder2_output[1], decoder2_output[2], decoder2_output[3], decoder2_output[5], decoder2_output[9], decoder2_output[6], \
                            # decoder2_output[8], decoder2_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
                # batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
                # batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
                
                # prefix = 'head3_'
                # batch_pred_map_cls = parse_predictions(decoder3_output[1], decoder3_output[2], decoder3_output[3], decoder3_output[5], decoder3_output[9], decoder3_output[6], \
                            # decoder3_output[8], decoder3_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
                # batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
                # batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
                
                # prefix = 'head4_'
                # batch_pred_map_cls = parse_predictions(decoder4_output[1], decoder4_output[2], decoder4_output[3], decoder4_output[5], decoder4_output[9], decoder4_output[6], \
                            # decoder4_output[8], decoder4_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
                # batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
                # batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
                
                # prefix = 'head5_'
                # batch_pred_map_cls = parse_predictions(decoder5_output[1], decoder5_output[2], decoder5_output[3], decoder5_output[5], decoder5_output[9], decoder5_output[6], \
                            # decoder5_output[8], decoder5_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
                # batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
                # batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
                
                # prefix = 'head6_'
                # batch_pred_map_cls = parse_predictions(decoder6_output[1], decoder6_output[2], decoder6_output[3], decoder6_output[5], decoder6_output[9], decoder6_output[6], \
                            # decoder6_output[8], decoder6_output[10], CONFIG_DICT, prefix, size_cls_agnostic=opt['size_cls_agnostic'])
                # batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
                # batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
                    
                 
            eval_loss /= idx
            logger.info("Eval_loss: {}".format(eval_loss))
            # # summarize map        
            # mAP = 0.0
            # for prefix in prefixes:
                # for (batch_pred_map_cls, batch_gt_map_cls) in zip(batch_pred_map_cls_dict[prefix], batch_gt_map_cls_dict[prefix]):
                    # for ap_calculator in ap_calculator_list:
                        # ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
                # # Evaluate average precision
                # for i, ap_calculator in enumerate(ap_calculator_list):
                    # metrics_dict = ap_calculator.compute_metrics()
                    # # logger.info(f"=====================>{prefix} IOU THRESH: {opt['ap_iou_thresholds'][i]}<=====================")
                    # # for key in metrics_dict:
                        # # logger.info(f'{key} {metrics_dict[key]}')
                    # if prefix == 'head6_' and ap_calculator.ap_iou_thresh > 0.3:
                        # mAP = metrics_dict['mAP']
                    # mAPs[i][1][prefix] = metrics_dict['mAP']
                    # ap_calculator.reset()
            # for mAP in mAPs:
                # logger.info(f'IoU[{mAP[0]}]:\t' + ''.join([f'{key}: {mAP[1][key]:.4f} \t' for key in sorted(mAP[1].keys())]))    
                
            # save checkpoint
            if get_rank() == 0:        
                logger.info('saving model...')
                save_path = os.path.join(opt['log_dir'], 'step_' + str(iter_id+1) + '.ckpt')
                save_checkpoint(network, save_path)
            network.set_train(True)
'''

def train_net_pointpilliar(args, train_cfg, data_loader, network, save_ckpt_log_flag, cb_params, ckpt_cb, run_context, steps_per_epoch, batch_size, device_num):
    
    log_freq = train_cfg['log_frequency_step']
    old_progress = -1
    start = time.time()
    
    log_train_txt_path = open(os.path.join(args.save_path, 'log_train.txt'),mode="a",encoding="utf-8")
    print('+++++++++++++++++++++++++++++++++++++++++++++++++++++Start training++++++++++++++++++++++++++++++++++++++++++++++++++++')
    print('+++++++++++++++++++++++++++++++++++++++++++++++++++++Start training++++++++++++++++++++++++++++++++++++++++++++++++++++', file = log_train_txt_path)
    for i, data in enumerate(data_loader):
        voxels = data["voxels"]
        num_points = data["num_points"]
        coors = data["coordinates"]
        labels = data['labels']
        reg_targets = data['reg_targets']
        batch_anchors = data["anchors"]
        bev_map = data.get('bev_map', False)  # value not used if use_bev = False

        loss = network(voxels, num_points, coors, bev_map, labels, reg_targets, batch_anchors)
        if save_ckpt_log_flag:
            cb_params.cur_step_num = i + 1  # current step number
            cb_params.batch_num = i + 2
            ckpt_cb.step_end(run_context)

            if i % log_freq == 0:
                time_used = time.time() - start
                epoch = i // steps_per_epoch
                fps = (i - old_progress) * batch_size * device_num / time_used
                date_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                print(f'{date_time} Epoch:{epoch}, Iteration:{i}, 'f'Loss:{loss}, Fps:{round(fps, 2)} Images/sec, Total_time/sec：{time_used}',flush=True)
                print(f'{date_time} Epoch:{epoch}, Iteration:{i}, 'f'Loss:{loss}, Fps:{round(fps, 2)} Images/sec, Total_time/sec：{time_used}',flush=True, file = log_train_txt_path)
                start = time.time()
                old_progress = i

            if (i + 1) % steps_per_epoch == 0:
                cb_params.cur_epoch_num += 1
    print('+++++++++++++++++++++++++++++++++++++++++++++++++++++End of training++++++++++++++++++++++++++++++++++++++++++++++++++++')
    print('+++++++++++++++++++++++++++++++++++++++++++++++++++++End of training++++++++++++++++++++++++++++++++++++++++++++++++++++', file = log_train_txt_path)

def train(opt):
    """set dir"""
    if opt['model_name'] == 'groupfree_3d':
        from mind3d.models.groupfree_3d import Groupfree3DModel
        from mind3d.models.losses.groupfree_3d_loss_helper import get_loss
        from mind3d.dataset.scannet_v2 import ScannetDetectionDataset, ScannetDatasetConfig
        DATASET_CONFIG = ScannetDatasetConfig()
        CONFIG_DICT = {'remove_empty_box': False, 'use_3d_nms': True,
               'nms_iou': 0.25, 'use_old_type_nms': False, 'cls_nms': True,
               'per_class_proposal': True, 'conf_thresh': 0.0,
               'dataset_config': DATASET_CONFIG}
        opt = load_yaml(args.cfg_path)
        LOG_DIR = os.path.join(opt['log_dir'], opt['model'],
                               f"{opt['dataset']}_{int(time.time())}", f"{np.random.randint(100000000)}")
        while os.path.exists(LOG_DIR):
            LOG_DIR = os.path.join(opt['log_dir'], opt['model'],
                                   f"{opt['dataset']}_{int(time.time())}", f"{np.random.randint(100000000)}")
        opt['log_dir'] = LOG_DIR
        os.makedirs(opt['log_dir'], exist_ok=True)

        init('nccl')
        rank_size = get_group_size()
        rank_id = get_rank()
        logger = setup_logger(output=opt['log_dir'], distributed_rank=rank_id, name="groupfree_3d")

    """set GPU"""
    if opt['model_name'] == 'groupfree_3d':
        import mindspore.dataset as ds
        if rank_size == 1: # single GPU
            context.set_context(mode=context.PYNATIVE_MODE,#GRAPH_MODE, PYNATIVE_MODE
                                device_target="GPU",
                                pynative_synchronize=False,
                                max_call_depth=1000)


            train_dataset = ScannetDetectionDataset('train', num_points=opt['num_point'], data_root=opt['data_root'], augment=True,)
            train_ds = ds.GeneratorDataset(train_dataset, ["point_clouds", "center_label", "heading_class_label", "heading_residual_label",
                                                            "size_class_label", "size_residual_label", "size_gts", "sem_cls_label", "box_label_mask",
                                                            "point_obj_mask", "point_instance_label", "scan_idx", "pcl_color"],
                                                            num_parallel_workers = 1, shuffle=True)
            train_ds = train_ds.batch(batch_size=opt['batch_size'], drop_remainder=True)
            step_size_per_epoch = train_ds.get_dataset_size()
            opt['step_size_per_epoch'] = step_size_per_epoch

            eval_dataset = ScannetDetectionDataset('val', num_points=opt['num_point'], data_root=opt['data_root'])
            eval_ds = ds.GeneratorDataset(eval_dataset, ["point_clouds", "center_label", "heading_class_label", "heading_residual_label",
                                                            "size_class_label", "size_residual_label", "size_gts", "sem_cls_label", "box_label_mask",
                                                            "point_obj_mask", "point_instance_label", "scan_idx", "pcl_color"],
                                                             num_samples=None, num_parallel_workers = 1, shuffle=False)
            eval_ds = eval_ds.batch(batch_size=opt['batch_size'], drop_remainder=True)
        else: # Multple GPUs
            context.set_context(mode=context.PYNATIVE_MODE,#GRAPH_MODE, PYNATIVE_MODE
                                device_target="GPU",
                                # pynative_synchronize=False,
                                # max_call_depth=1000
                                )
            mindspore.set_auto_parallel_context(parallel_mode=mindspore.ParallelMode.DATA_PARALLEL, gradients_mean=True, parameter_broadcast =True)
            train_dataset = ScannetDetectionDataset('train', num_points=opt['num_point'], data_root=opt['data_root'], augment=True)
            sampler = ds.DistributedSampler(num_shards=rank_size, shard_id=rank_id, num_samples=None) #int(len(train_dataset)/rank_size)
            train_ds = ds.GeneratorDataset(train_dataset, ["point_clouds", "center_label", "heading_class_label", "heading_residual_label",
                                                            "size_class_label", "size_residual_label", "size_gts", "sem_cls_label", "box_label_mask",
                                                            "point_obj_mask", "point_instance_label", "scan_idx", "pcl_color"],
                                                            num_parallel_workers = 1, sampler=sampler, )#shuffle=True,num_shards=rank_size, shard_id=rank_id, num_samples=4,
            train_ds = train_ds.batch(batch_size=opt['batch_size'], drop_remainder=True)
            step_size_per_epoch = train_ds.get_dataset_size()
            opt['step_size_per_epoch'] = step_size_per_epoch
            train_ds = train_ds.repeat(opt['max_epoch'])

            eval_dataset = ScannetDetectionDataset('val', num_points=opt['num_point'], data_root=opt['data_root'])
            eval_ds = ds.GeneratorDataset(eval_dataset, ["point_clouds", "center_label", "heading_class_label", "heading_residual_label",
                                                            "size_class_label", "size_residual_label", "size_gts", "sem_cls_label", "box_label_mask",
                                                            "point_obj_mask", "point_instance_label", "scan_idx", "pcl_color"],
                                                            num_parallel_workers = 1, num_shards=rank_size, shard_id=rank_id, shuffle=False)
            eval_ds = eval_ds.batch(batch_size=opt['batch_size'], drop_remainder=True)
    elif opt['model_name'] == 'pointpilliars':
        args.cfg_path = opt['train_set_up']['cfg_path'] 
        args.save_path = opt['train_set_up']['save_path'] 
        args.device_target = opt['train_set_up']['device_target'] 
        args.is_distributed = opt['train_set_up']['is_distributed'] 
        args.GPU_ID = opt['train_set_up']['GPU_ID'] 
        cfg, rank, device_num = set_default(args)
        save_ckpt_log_flag = rank == 0
        train_cfg = cfg['train_config']


    """Load Module"""
    if opt['model_name'] == 'groupfree_3d':
        num_input_channel = 0
        model = Groupfree3DModel(num_class=DATASET_CONFIG.num_class,
                                 num_heading_bin=DATASET_CONFIG.num_heading_bin,
                                 num_size_cluster=DATASET_CONFIG.num_size_cluster,
                                 mean_size_arr=DATASET_CONFIG.mean_size_arr,
                                 input_feature_dim=num_input_channel,
                                 width=opt['width'],
                                 bn_momentum=opt['bn_momentum'],
                                 sync_bn=False,
                                 num_proposal=opt['num_target'],
                                 sampling=opt['sampling'],
                                 dropout=opt['transformer_dropout'],
                                 activation=opt['transformer_activation'],
                                 nhead=opt['nhead'],
                                 num_decoder_layers=opt['num_decoder_layers'],
                                 dim_feedforward=opt['dim_feedforward'],
                                 self_position_embedding=opt['self_position_embedding'],
                                 cross_position_embedding=opt['cross_position_embedding'],
                                 size_cls_agnostic=False)
        if opt['resume'] == True:
            param_dict = mindspore.load_checkpoint(opt['checkpoint_path'])
            param_not_load = mindspore.load_param_into_net(model, param_dict)
            logger.info(f"parameters not load: {param_not_load}")
        if get_rank() == 0:
            logger.info(str(model))
        model.set_train(True)
    elif opt['model_name'] == 'pointpilliars':
        pointpillarsnet, dataset = get_model_dataset(cfg, True)
        if save_ckpt_log_flag:
            print('Set up PointPillarsNet', flush=True)
        input_cfg = cfg['train_input_reader']
        n_epochs = input_cfg['max_num_epochs']
        batch_size = input_cfg['batch_size']
        steps_per_epoch = int(len(dataset) / batch_size / device_num)

    """lr schedule"""
    if opt['model_name'] == 'groupfree_3d':
        milestones=[(m - opt['warmup_epoch']) * step_size_per_epoch for m in opt['lr_decay_epochs']]
        learning_rates_nondecoder = [opt['learning_rate'] * (opt['lr_decay_rate'] ** x) for x in range(0, len(opt['lr_decay_epochs']))]
        learning_rates_decoder = [opt['decoder_learning_rate'] * (opt['lr_decay_rate'] ** x) for x in range(0, len(opt['lr_decay_epochs']))]
        lr_nondecoder = nn.piecewise_constant_lr(milestones, learning_rates_nondecoder)
        lr_decoder = nn.piecewise_constant_lr(milestones, learning_rates_decoder)
    elif opt['model_name'] == 'pointpilliars':
        lr_cfg = train_cfg['learning_rate']
        lr = nn.exponential_decay_lr(
            learning_rate=lr_cfg['initial_learning_rate'],
            decay_rate=lr_cfg['decay_rate'],
            total_step=n_epochs * steps_per_epoch,
            step_per_epoch=steps_per_epoch,
            decay_epoch=lr_cfg['decay_epoch'],
            is_stair=lr_cfg['is_stair']
            )

    """define optimizer & set learning rate"""
    if opt['model_name'] == 'groupfree_3d':
        decoder_params = list(filter(lambda x: 'decoder' in x.name, model.trainable_params()))
        no_decoder_params = list(filter(lambda x: 'decoder' not in x.name, model.trainable_params()))
        group_params = [
            {'params': decoder_params, 'lr': lr_decoder},
            {'params': no_decoder_params, 'lr': lr_nondecoder},
        ]
        optim = nn.AdamWeightDecay(group_params, weight_decay=opt['weight_decay'])
    elif opt['model_name'] == 'pointpilliars':
        optimizer = Adam(
        pointpillarsnet.trainable_params(),
        learning_rate=lr,
        weight_decay=train_cfg['weight_decay']
    )

    """define loss function"""
    if opt['model_name'] == 'groupfree_3d':
        network_loss = get_loss
        net_with_loss = CustomWithLossCell(model, network_loss)
        # net_train = nn.TrainOneStepCell(net_with_loss, optim)
        net_train = TrainOneStepCellGradsNorm(net_with_loss, optim)
        train_net(train_ds, eval_ds, model, net_train, opt, logger)
    elif opt['model_name'] == 'pointpilliars':
        pointpillarsnet_wloss = PointPillarsWithLossCell(pointpillarsnet, cfg['model'])
        network = TrainingWrapper(pointpillarsnet_wloss, optimizer)
        train_column_names = dataset.data_keys
        sampler = de.DistributedSampler(device_num, rank)
        ds = de.GeneratorDataset(
            dataset,
            column_names=train_column_names,
            python_multiprocessing=True,
            num_parallel_workers=1,
            max_rowsize=100,
            sampler=sampler
        )
        ds = ds.batch(batch_size, drop_remainder=True)
        ds = ds.repeat(n_epochs)
        data_loader = ds.create_dict_iterator(num_epochs=n_epochs)
        network.set_train()
        if save_ckpt_log_flag:
            ckpt_config = CheckpointConfig(
                save_checkpoint_steps=steps_per_epoch,
                keep_checkpoint_max=train_cfg['keep_checkpoint_max']
            )
            ckpt_cb = ModelCheckpoint(
                config=ckpt_config,
                directory=args.save_path,
                prefix='pointpillars'
            )
            cb_params = _InternalCallbackParam()
            cb_params.train_network = pointpillarsnet
            cb_params.epoch_num = n_epochs
            cb_params.cur_epoch_num = 1
            run_context = RunContext(cb_params)
            ckpt_cb.begin(run_context)
        train_net_pointpilliar(args, train_cfg, data_loader, network, save_ckpt_log_flag, cb_params, ckpt_cb, run_context, steps_per_epoch, batch_size, device_num)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='3d detection train')
    parser.add_argument('--opt', default='/data1/mind3d-master/configs/pointpillars/car_xyres16.yaml', help='Path to config file.')
    args = parser.parse_known_args()[0]
    opt = load_yaml(args.opt)
    train(opt)

