import argparse
import os
import sys

import mindspore
import mindspore.nn as nn
from mindspore import context,load_checkpoint,load_param_into_net
from mindspore.context import ParallelMode
from mindspore.common import set_seed
import mindspore.dataset as ds
from mindspore.communication.management import init
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint,CheckpointConfig

set_seed(1)

BASE_DIR=os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(BASE_DIR)))
ROOT_DIR=os.path.join(os.path.dirname(os.path.dirname(BASE_DIR)),"models")
sys.path.append(ROOT_DIR)

from mind3d.dataset.ModelNet40 import ModelNet40Dataset
from mind3d.utils.callback.monitor import ValAccMonitor
from mind3d.models.losses.Lossfunction import CrossEntropySmooth_CLS,NLLLoss
from mind3d.utils.load_yaml import load_yaml
from mind3d.models.dgcnn import DGCNN_cls
from mind3d.models.pointnet import PointNet_cls
from mind3d.models.pointnet2 import Pointnet2clsModelSSG
from mind3d.models.PointTransformer import PointTransformerCls
from mind3d.utils.PointTransformerUtils import shift_point_cloud, random_point_dropout, random_scale_point_cloud



def train(opt):
    """cls train."""

    # device.
    device_id = int(os.getenv('DEVICE_ID', '0'))
    device_num = int(os.getenv('RANK_SIZE', '1'))

    if not opt['device_target'] in ("Ascend", "GPU"):
        raise ValueError("Unsupported platform {}".format(opt['device_target']))

    if opt['device_target'] == "Ascend":
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="Ascend",
                            save_graphs=False,
                            device_id=device_id)
        context.set_context(max_call_depth=2048)

    else:
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="GPU",
                            save_graphs=False,
                            max_call_depth=2048)

    # run distribute.
    if opt['run_distribute']:
        if opt['device_target'] == "Ascend":
            if device_num > 1:
                init()
                context.set_auto_parallel_context(
                    parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
        else:
            if device_num > 1:
                mindspore.dataset.config.set_enable_shared_mem(False)
                context.set_auto_parallel_context(
                    parallel_mode=context.ParallelMode.DATA_PARALLEL,
                    gradients_mean=True,
                    device_num=device_num)
                mindspore.common.set_seed(1234)
                init()
            else:
                context.set_context(device_id=device_id)

    #Data Pipeline.
    dataset = ModelNet40Dataset(root_path=opt['datasets']['train'].get('data_url'),
                                split="train",
                                num_points=opt['datasets']['train'].get('resize'),
                                use_norm=opt['datasets']['train'].get('use_norm'))

    dataset_train = ds.GeneratorDataset(dataset, ["data", "label"], shuffle=True)

    if opt['train'].get('use_aug'):
        trans = [random_point_dropout(), random_scale_point_cloud(), shift_point_cloud()]
        dataset_train = dataset_train.map(operations=trans,
                              input_columns="data")
    
    dataset_train = dataset_train.batch(batch_size=opt['datasets']['train'].get('batch_size'), drop_remainder=True)
  
    dataset = ModelNet40Dataset(root_path=opt['datasets']['val'].get('data_url'),
                                split="val",
                                num_points=opt['datasets']['val'].get('resize'),
                                use_norm=opt['datasets']['val'].get('use_norm'))

    dataset_val = ds.GeneratorDataset(dataset, ["data", "label"], shuffle=True)
    dataset_val = dataset_val.batch(batch_size=opt['datasets']['val'].get('batch_size'), drop_remainder=True)

    step_size = dataset_train.get_dataset_size()

    '''MODEL LOADING'''
    # Create model.
    network_name=opt["model_name"]

    if network_name=="DGCNN_cls":
        network=DGCNN_cls(opt,output_channels=opt['train'].get("num_classes"))
    elif network_name=="PointNet_cls":
        network = PointNet_cls(k=opt['train'].get('num_classes'))
    elif network_name=="Pointnet2clsModelSSG":
        network = Pointnet2clsModelSSG(normal_channel=opt['datasets']['train'].get('use_norm'))
    elif network_name=="PointTransformerCls":
        network = PointTransformerCls()


    # load checkpoint
    if opt['train']['ckpt_file'].endswith('.ckpt'):
        print("Load checkpoint: %s" % opt['train']['ckpt_file'])
        param_dict = load_checkpoint(opt['train']['ckpt_file'])
        load_param_into_net(network, param_dict)

    # Set learning rate scheduler.
    if opt['train']['lr_decay_mode'] == "cosine_decay_lr":
        lr = nn.cosine_decay_lr(min_lr=opt['train']['min_lr'],
                                max_lr=opt['train']['max_lr'],
                                total_step=opt['train']['epoch_size'] * step_size,
                                step_per_epoch=step_size,
                                decay_epoch=opt['train']['decay_epoch'])
    elif opt['train']['lr_decay_mode'] == "piecewise_constant_lr":
        lr = nn.piecewise_constant_lr(opt['train']['milestone'], opt['train']['learning_rates'])

    # Define optimizer.
    network_opt = nn.Adam(network.trainable_params(), lr, opt['train']['momentum'], weight_decay = float(opt['train'].get('weight_decay')))

    # Define loss function.
    if network_name == "DGCNN_cls":
        network_loss = CrossEntropySmooth_CLS(num_classes=40)
    elif network_name == "PointNet_cls":
        network_loss = NLLLoss(reduction="mean")
    elif network_name == "Pointnet2clsModelSSG":
        network_loss = NLLLoss(reduction="mean")
    elif network_name=="PointTransformerCls":
        network_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

    # Define metrics.
    metrics = {"Accuracy": nn.Accuracy()}

    # Init the model.
    model = Model(network, loss_fn=network_loss, optimizer=network_opt, metrics=metrics)

    # Set the checkpoint config for the network.
    ckpt_config = CheckpointConfig(save_checkpoint_steps=step_size,
                                   keep_checkpoint_max=opt['train']['keep_checkpoint_max'])

    ckpt_callback = ModelCheckpoint(prefix=opt["model_name"],
                                    directory=opt['train']['ckpt_save_dir'],
                                    config=ckpt_config)

    # Begin to train.
    model.train(opt['train']['epoch_size'],
                dataset_train,
                callbacks=[ckpt_callback, ValAccMonitor(model, dataset_val, opt['train']['epoch_size'])],
                dataset_sink_mode=opt['train']['dataset_sink_mode'])


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Train.')
    parser.add_argument('-opt',default="/home/czy/HuaWei/final2023/mind3d/configs/pointnet2/pointnet2_modelnet40_cls.yaml",
                        help='Path to option YAML file.')
    args = parser.parse_known_args()[0]
    opt = load_yaml(args.opt)
    train(opt)
