# Author: Xiangtai Li
# Email: lxtpku@pku.edu.cn
"""
    Distribute Training Code For Fast training.
"""
import argparse
import os
import os.path as osp
import timeit
import time
import numpy as np
from mindspore import dtype as mstype
from mindspore.train.callback import ModelCheckpoint
import mindspore
from mindspore import load_checkpoint, load_param_into_net
from mindspore import context, Model
from mindspore import dataset as ds
import mindspore.nn as nn
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import LossMonitor, TimeMonitor, ModelCheckpoint, CheckpointConfig
from src.logger import Logger as Log
from src.tools import adjust_learning_rate, all_reduce_tensor
from src.cityscapes import Cityscapes, create_traindataset
from src.loss import CriterionOhemDSN, CriterionDSN
from src import loss
from src.DualGCNNet import DualSeg_res101,TrainOneStepCell, TrainOneStepWithLossScaleCell
from mindspore.context import ParallelMode
from mindspore.train.callback import SummaryCollector
from mindspore.train.summary import SummaryRecord
from mindspore.nn.wrap.cell_wrapper import WithLossCell
from mindspore.train.loss_scale_manager import DynamicLossScaleManager


def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.')


def get_arguments():
    """
    Parse all the arguments
    Returns: args
    A list of parsed arguments.
    """
    parser = argparse.ArgumentParser(description="DGCNet-ResNet101 Network")
    parser.add_argument("--batch_size_per_gpu", type=int, default=1,
                        help="Number of images sent to the network in one step.")
    parser.add_argument("--batch_size", type=int, default=8,
                        help="Number of images sent to the network in one step.")
    parser.add_argument('--gpu_num', type=int, default=8)
    parser.add_argument("--data_dir", type=str, default="./data",
                        help="Path to the directory containing the Cityscapes dataset.")
    parser.add_argument("--data_list", type=str, default="./data/cityscapes/train.txt",
                        help="Path to the file listing the images in the dataset.")
    parser.add_argument("--data_set", type=str, default="cityscapes", help="dataset to train")
    #parser.add_argument("--arch", type=str, default="", help="network architecture")
    parser.add_argument("--ignore_label", type=int, default=255,
                        help="The index of the label to ignore during the training.")
    parser.add_argument("--input_size", type=int, default=832 ,
                        help="Comma-separated string with height and width of images.")
    parser.add_argument("--learning_rate", type=float, default=1e-2,
                        help="Base learning rate for training with polynomial decay.")
    parser.add_argument("--momentum", type=float, default=0.9,
                        help="Momentum component of the optimiser.")
    parser.add_argument("--num_classes", type=int, default=19,
                        help="Number of classes to predict (including background).")
    parser.add_argument("--num_steps", type=int, default=60000,
                        help="Number of training steps.")
    parser.add_argument("--power", type=float, default=0.9,
                        help="Decay parameter to compute the learning rate.")
    parser.add_argument("--weight_decay", type=float, default=5e-4,
                        help="Regularisation parameter for L2-loss.")
    parser.add_argument("--num_workers", type=int, default=1)
    parser.add_argument("--random_mirror", action="store_true", default=True,
                        help="Whether to randomly mirror the inputs during the training.")
    parser.add_argument("--random_scale", action="store_true", default=True,
                        help="Whether to randomly scale the inputs during the training.")
    parser.add_argument("--random_seed", type=int, default=1234,
                        help="Random seed to have reproducible results.")
    parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
    parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
    parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
    # ***** Params for save and load ******
    parser.add_argument("--restore_from", type=str, default=None,
                        help="Where restore models parameters from.")
    parser.add_argument("--save_checkpoint_steps", type=int, default=50000,
                        help="Save checkpoint every often.")
    parser.add_argument("--save_checkpoint_num", type=int, default=1,
                        help="Save checkpoint numbers, default is 1.")
    parser.add_argument("--save_dir", type=str, default=None,
                        help="Where to save snapshots of the models.")
    parser.add_argument("--save_start",type=int, default=5000)
    parser.add_argument("--gpu", type=str, default=None,
                        help="choose gpu device.")
    parser.add_argument("--ft", type=bool, default=False,
                        help="fine-tune the models with large input size.")
    # **** Params for OHEM **** #
    parser.add_argument("--ohem", type=str2bool, default='False',
                        help="use hard negative mining")
    parser.add_argument("--ohem_thres", type=float, default=0.7,
                        help="choose the samples with correct probability underthe threshold.")
    parser.add_argument("--ohem_keep", type=int, default=100000,
                        help="choose the samples with correct probability underthe threshold.")
    # ***** Params for logging ***** #
    parser.add_argument('--log_level', default="info", type=str,
                        dest='log_level', help='To set the log level to files.')
    parser.add_argument('--log_file', default="./eval.log", type=str,
                        dest='log_file', help='The path of log files.')
    parser.add_argument("--log_format", default="%(asctime)s %(levelname)-7s %(message)s", type=str,
                        dest="log_format", help="format of log files"
                        )
    parser.add_argument('--stdout_level', default="info", type=str,
                        dest='stdout_level', help='To set the level to print to screen.')
    parser.add_argument("--rewrite", default=False, type=bool,
                        dest="rewrite", help="whether write the file when using log"
                        )
    parser.add_argument("--rgb", type=str2bool, default='False')
    # ***** Params for Distributed Traning ***** #
    parser.add_argument('--apex', action='store_true', default=False,
                        help='Use Nvidia Apex Distributed Data Parallel')
    parser.add_argument("--local_rank", default=0, type=int, help="parameter used by apex library")
    args = parser.parse_args()
    return args

start = timeit.default_timer()
args = get_arguments()
device_id = args.device_id
device_num = args.device_num
target = 'GPU'

class BuildTrainNetwork(nn.Cell):
    def __init__(self, network, criterion):
        super(BuildTrainNetwork, self).__init__()
        self.network = network
        self.criterion = criterion

    def construct(self, input_data, label):
        output = self.network(input_data)
        net_loss = self.criterion(output, label)
        return net_loss

def train():
    # init context
    #context.set_context(mode=context.PYNATIVE_MODE, device_target=target)
    context.set_context(mode=context.GRAPH_MODE, device_target=target)
    context.set_context(device_id=device_id)
    # make save dir
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    # launch the logger
    Log.init(
        log_level=args.log_level,
        log_file=osp.join(args.save_dir, args.log_file),
        log_format=args.log_format,
        rewrite=args.rewrite,
        stdout_level=args.stdout_level
    )
    # RGB or BGR input(RGB input for ImageNet pretrained models while BGR input for caffe pretrained models)
    h, w = args.input_size, args.input_size
    input_size = (h, w)
    if args.rgb:
        IMG_MEAN = np.array((0.485, 0.456, 0.406), dtype=np.float32)
        IMG_VARS = np.array((0.229, 0.224, 0.225), dtype=np.float32)
    else:
        IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)
        IMG_VARS = np.array((1, 1, 1), dtype=np.float32)
    # set models
    #dgcnet = DualSeg_res101(num_classes=args.num_classes, is_train=True)
    #model_params = dgcnet.trainable_params()

    if not args.gpu == None:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu


    # set max_iters
    batch_size = args.gpu_num * args.batch_size_per_gpu
    max_iters = args.num_steps * batch_size / args.gpu_num
    # set data loader
    train_ds = create_traindataset(args, crop_size=input_size, max_iters=max_iters, mean=IMG_MEAN,vars=IMG_VARS, scale=args.random_scale, mirror=args.random_mirror)
    train_data_loader = train_ds.create_dict_iterator()
    Log.info("Create train dataset done!")
    net_with_loss = CriterionOhemDSN(args)
    print("net with loss done!")
    # set optimizer
    polynomial_decay_lr = nn.PolynomialDecayLR(learning_rate=args.learning_rate, end_learning_rate=0.0, decay_steps=int(max_iters), power=args.power)
    optim = nn.SGD(params=net_with_loss.trainable_params(), learning_rate=polynomial_decay_lr, momentum=args.momentum,
                   weight_decay=args.weight_decay)
    # load pretrain params
    if args.restore_from is not None:
        saved_state_dict = mindspore.load_checkpoint(args.restore_from)
        new_params = net_with_loss.parameters_dict().copy()
        for i in saved_state_dict:
            i_parts = i.split('.')
            if not i_parts[0] == 'fc':
                new_params['.'.join(i_parts[0:])] = saved_state_dict[i]

        Log.info("load pretrined models")
        load_param_into_net(net_with_loss, new_params, strict_load=False)
    else:
        Log.info("train from stracth")

    loss_scale_manager = DynamicLossScaleManager()
    #manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2 ** 12, scale_factor=2, scale_window=1000)
    print("dynamic loss scale done!")
    #train_net = TrainOneStepWithLossScaleCell(net_with_loss, optim, scale_sense=manager )
    print("train one step done!")

    model = Model(net_with_loss, loss_scale_manager=loss_scale_manager, optimizer=optim)
    config_ck = CheckpointConfig(save_checkpoint_steps=args.save_checkpoint_steps,
                                 keep_checkpoint_max=args.save_checkpoint_num)
    ckpoint_cb_train = ModelCheckpoint(prefix='checkpoint_dgcnet_resnet101', directory=args.save_dir, config=config_ck)
    time_cb_train = TimeMonitor(data_size=train_ds.get_dataset_size())
    loss_cb_train = LossMonitor()

    # start training:
    Log.info("-------------------------------train begins-------------------------------")
    model.train(epoch=3, train_dataset=train_ds, callbacks=[loss_cb_train, time_cb_train, ckpoint_cb_train],
                dataset_sink_mode=True)


    '''
    for epoch in range(0, 5):
        #print("training {:d} epoch:".format(epoch + 1))
        total_loss = 0
        for i_iter, data in enumerate(train_data_loader):
            image = data['image']
            label = data['label'].astype("int64")

            iter_loss, _, _ = train_net(image, label)
            #iter_loss = train_net(image, label)
            #total_loss += iter_loss
            Log.info('iter = {} of {} completed, lr={}, loss = {}'.format(i_iter, train_ds.get_dataset_size(), polynomial_decay_lr(i_iter), iter_loss))
            #summary_record.add_value('scalar', 'loss', iter_loss)
            #summary_record.record(i_iter)
            if i_iter % args.save_checkpoint_steps == 0:
                Log.info('save models ...')
                mindspore.save_checkpoint(train_net, osp.join(args.save_dir, 'DGCNet_' + str(epoch)  + '.ckpt'))
    '''

    end = timeit.default_timer()
    Log.info("Training cost: "+ str(end - start) + 'seconds')

    Log.info("Save final models")
    mindspore.save_checkpoint(train_net, osp.join(args.save_dir, str(args.arch) + '_final' + '.ckpt'))


if __name__ == '__main__':
    train()
