""" DGCNN segmentation training script."""

import argparse
import os
import sys

import mindspore
import mindspore.nn as nn
from mindspore import context,load_checkpoint,load_param_into_net
from mindspore.context import ParallelMode
from mindspore.common import set_seed
import mindspore.dataset as ds
from mindspore.communication.management import init
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint,CheckpointConfig
from mindspore.train.callback import TimeMonitor

set_seed(1)

BASE_DIR=os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(BASE_DIR)))
ROOT_DIR=os.path.join(os.path.dirname(os.path.dirname(BASE_DIR)),"models")
sys.path.append(ROOT_DIR)

from mind3d.dataset.S3DIS import S3DISDataset
from mind3d.utils.callback.monitor import ValAccMonitor
from mind3d.models.losses.Lossfunction import CrossEntropySmooth_SEG,NLLLoss
from mind3d.utils.load_yaml import load_yaml
from mind3d.models.dgcnn import DGCNN_seg
from mind3d.dataset.scannet import create_scannet_dataset
from mind3d.models.pointnet2 import Pointnet2segModelSSG
from mind3d.utils.common import mIoU, CustomWithLossCell, ScannetWithEvalCell, CallbackSaveBymIoU, CheckLoss

def dgcnn_seg_train(opt,test_area):
    """DGCNN set train"""

    # device.
    device_id = int(os.getenv('DEVICE_ID', '0'))
    device_num = int(os.getenv('RANK_SIZE', '1'))

    if not opt['device_target'] in ("Ascend", "GPU"):
        raise ValueError("Unsupported platform {}".format(opt['device_target']))

    if opt['device_target'] == "Ascend":
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="Ascend",
                            save_graphs=False,
                            device_id=device_id)
        context.set_context(max_call_depth=2048)

    else:
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="GPU",
                            save_graphs=False,
                            max_call_depth=2048)

    # run distribute.
    if opt['run_distribute']:
        if opt['device_target'] == "Ascend":
            if device_num > 1:
                init()
                context.set_auto_parallel_context(
                    parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
        else:
            if device_num > 1:
                mindspore.dataset.config.set_enable_shared_mem(False)
                context.set_auto_parallel_context(
                    parallel_mode=context.ParallelMode.DATA_PARALLEL,
                    gradients_mean=True,
                    device_num=device_num)
                mindspore.common.set_seed(1234)
                init()
            else:
                context.set_context(device_id=device_id)

    #Data Pipeline.
    dataset = S3DISDataset(split="train", num_points=opt["datasets"]['train'].get("resize"), test_area=test_area)
    dataset_train = ds.GeneratorDataset(dataset, ["data", "label"], shuffle=True)
    dataset_train = dataset_train.batch(opt["datasets"]['train'].get("batch_size"), drop_remainder=True)

    dataset = S3DISDataset(split='test', num_points=opt["datasets"]['val'].get("resize"), test_area=test_area)
    dataset_val = ds.GeneratorDataset(dataset, ["data", "label"], shuffle=False)
    dataset_val = dataset_val.batch(opt["datasets"]['val'].get("batch_size"),drop_remainder=True)

    step_size = dataset_train.get_dataset_size()

    """MODEL LOADING."""
    #create model.
    network=DGCNN_seg(opt,k=opt["train"].get("k"))


    # load checkpoint
    if opt['train']['pretrained_ckpt'].endswith('.ckpt'):
        print("Load checkpoint: %s" % opt['train']['pretrained_ckpt'])
        param_dict = load_checkpoint(opt['train']['pretrained_ckpt'])
        load_param_into_net(network, param_dict)

    # Set learning rate scheduler.
    if opt['train']['lr_decay_mode'] == "cosine_decay_lr":
        lr = nn.cosine_decay_lr(min_lr=opt['train']['min_lr'],
                                max_lr=opt['train']['max_lr'],
                                total_step=opt['train']['epoch_size'] * step_size,
                                step_per_epoch=step_size,
                                decay_epoch=opt['train']['decay_epoch'])
    elif opt['train']['lr_decay_mode'] == "piecewise_constant_lr":
        lr = nn.piecewise_constant_lr(opt['train']['milestone'], opt['train']['learning_rates'])

    #Define optimizer.
    network_opt = nn.Adam(network.trainable_params(), lr, opt['train']['momentum'])

    #Define loss function.
    network_loss=CrossEntropySmooth_SEG(num_classes=13,reduction="mean",sparse=True)

    # Define metrics.
    metrics = {"Accuracy": nn.Accuracy()}

    # Init the model.
    model = Model(network, loss_fn=network_loss, optimizer=network_opt, metrics=metrics)

    # Set the checkpoint config for the network.
    ckpt_config = CheckpointConfig(save_checkpoint_steps=step_size,
                                   keep_checkpoint_max=opt['train']['keep_checkpoint_max'])
    prefix_name="model_%s"%test_area
    ckpt_callback = ModelCheckpoint(prefix=prefix_name,
                                    directory=opt['train']['ckpt_save_dir'],
                                    config=ckpt_config)

    # Begin to train.
    print("============ Starting Training ================")
    model.train(opt['train']['epoch_size'],
                dataset_train,
                callbacks=[ckpt_callback, ValAccMonitor(model, dataset_val, opt['train']['epoch_size'])],
                dataset_sink_mode=opt['train']['dataset_sink_mode'])
    print("============ End Training ====================")


def pointnet2_seg_train(opt):
    """PointNet++ train."""
    # device.
    device_id = int(os.getenv('DEVICE_ID', '0'))
    device_num = int(os.getenv('RANK_SIZE', '1'))

    if not opt['device_target'] in ("Ascend", "GPU"):
        raise ValueError("Unsupported platform {}".format(opt['device_target']))

    if opt['device_target'] == "Ascend":
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="Ascend",
                            save_graphs=False,
                            device_id=device_id)
        context.set_context(max_call_depth=2048)

    else:
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="GPU",
                            save_graphs=False,
                            max_call_depth=204800)

    # run distribute.
    if opt['run_distribute']:
        if opt['device_target'] == "Ascend":
            if device_num > 1:
                init()
                context.set_auto_parallel_context(
                    parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
        else:
            if device_num > 1:
                mindspore.dataset.config.set_enable_shared_mem(False)
                context.set_auto_parallel_context(
                    parallel_mode=context.ParallelMode.DATA_PARALLEL,
                    gradients_mean=True,
                    device_num=device_num)
                mindspore.common.set_seed(1234)
                init()
            else:
                context.set_context(device_id=device_id)

    #load datasets
    train_dataset = create_scannet_dataset(opt, split="train")
    eval_dataset = create_scannet_dataset(opt, split="val")

    steps_per_epoch = train_dataset.get_dataset_size()
    test_steps_per_epoch = eval_dataset.get_dataset_size()
    step_size = steps_per_epoch

    """MODEL LOADING."""
    #create model.
    network = Pointnet2segModelSSG(num_classes=opt['datasets']['train'].get('num_classes'),
                              use_color=opt['datasets']['train'].get('use_color'),
                              use_normal=opt['datasets']['train'].get('use_normal'))

    # load checkpoint
    if opt['train']['pretrained_ckpt'].endswith('.ckpt'):
        print("Load checkpoint: %s" % opt['train']['pretrained_ckpt'])
        param_dict = load_checkpoint(opt['train']['pretrained_ckpt'])
        load_param_into_net(network, param_dict)

    # Set learning rate scheduler.
    if opt['train']['lr_decay_mode'] == "cosine_decay_lr":
        lr = nn.cosine_decay_lr(min_lr=opt['train']['min_lr'],
                                max_lr=opt['train']['max_lr'],
                                total_step=opt['epoch'] * step_size,
                                step_per_epoch=step_size,
                                decay_epoch=opt['train']['decay_epoch'])

    elif opt['train']['lr_decay_mode'] == "piecewise_constant_lr":
        lr = nn.piecewise_constant_lr(opt['train']['milestone'], opt['train']['learning_rates'])

    #Define optimizer.
    network_opt = nn.Adam(network.trainable_params(), lr, opt['train']['momentum'])

    #Define callback.
    loss_cb = CheckLoss()
    time_cb = TimeMonitor(step_size)

    #Define loss function.
    network_loss = NLLLoss(reduction="mean")
    net_with_criterion = CustomWithLossCell(network, network_loss)

    # Define metrics.
    metrics = {'IoU': mIoU(test_steps_per_epoch, opt['datasets']['val'].get('batch'), opt['datasets']['train'].get('num_classes'))}

    # Init the model.
    eval_network = ScannetWithEvalCell(network, True)
    model = Model(net_with_criterion,
                  optimizer=network_opt,
                  eval_network=eval_network,
                  metrics=metrics)

    ckpoint_cb = CallbackSaveBymIoU(model, eval_dataset, save_path=opt['train']['ckpt_save_dir'])
    print("============== Starting Training ==============")
    model.train(opt['epoch'], train_dataset, callbacks=[loss_cb, time_cb, ckpoint_cb], dataset_sink_mode=True)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Train.')
    parser.add_argument('-opt', type=str,default="/home/czy/HuaWei/final2023/mind3d/configs/pointnet2/pointnet2_scannet_seg.yaml", help='Path to option YAML file.')
    parser.add_argument("-test_area",type=int,default="1",help="Choose test_area")
    args = parser.parse_known_args()[0]
    opt = load_yaml(args.opt)
    network_name = opt["model_name"]
    if network_name=="DGCNN_seg":
        dgcnn_seg_train(opt,args.test_area)
    elif network_name=="Pointnet2_seg":
        pointnet2_seg_train(opt)
