# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""train scripts"""
import ast
import time
import argparse
import mindspore.common
from mindspore import load_checkpoint, load_param_into_net
import mindspore.nn as nn
from src.data import create_traindataset
from mindspore import context, Tensor
from mindspore import save_checkpoint
from mindspore.context import ParallelMode
from mindspore.communication.management import init
from src.networks import define_G
from src.loss import InvertibleRescalingNetLoss
from src.model import TrainOneStepInvertibleRescalingNet
from src.optim.warmup_cosine_annealing_lr import warmup_cosine_annealing_lr
from src.optim.warmup_multisteplr import warmup_step_lr

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='IIR train')

    # Hardware specifications
    parser.add_argument('--seed', type=int, default=10,
                        help='random seed')

    # Data specifications
    parser.add_argument('--scale', type=int, default=2, choices=(2, 4),
                        help='Super resolution upscale factor.')
    parser.add_argument('--dataset_GT', type=str, default=None,
                        help='Path to the folder where the intended GT dataset is stored.')
    parser.add_argument('--dataset_LQ', type=str, default=None,
                        help='Path to the folder where the intended LQ dataset is stored.')

    # Train specifications
    parser.add_argument('--resume_state', type=str, default=None,
                        help='Path to the checkpoint.')
    parser.add_argument('--device_target', type=str, default='GPU', choices=("GPU", "CPU"),
                        help="Device target, support GPU, Ascend.")
    parser.add_argument('--device_num', type=int,default=1,
                        help='Device num.')
    parser.add_argument("--device_id", type=int, default=0,
                        help="Device id, default: 0.")
    parser.add_argument('--run_distribute', type=ast.literal_eval, default=False,
                        help='Run distribute, default: false.')
    parser.add_argument('--epochs', type=int, default=5000,
                        help='Number of epochs to train')
    parser.add_argument('--batch_size', type=int, default=4,
                        help='Input batch size for training')
    parser.add_argument('--start_epoch',type=int, default=0,
                        help='Checkpoing continue training.')

    # Network
    parser.add_argument("--subnet_type", type=str, default='DBNet',
                        help="The type of subnet, default: DBNet.")
    parser.add_argument("--in_nc", type=int, default=3,
                        help="The input channels, default: 3.")
    parser.add_argument("--out_nc", type=int, default=3,
                        help="The output channels, default: 3.")
    parser.add_argument("--block_num", type=int, default=[8, 8],
                        help="The block num, default: [8, 8].")
    parser.add_argument("--G_scale", type=int, default=2,
                        help="Super resolution upscale factor, default: false.")
    parser.add_argument("--G_init", type=str, default='xavier',
                        help="The init method of net, default: false.")

    # Loss specifications
    parser.add_argument("--pixel_criterion_forw", type=str, default='l2',
                        help="The pixel criterion of forward, default: l2.")
    parser.add_argument("--pixel_criterion_back", type=str, default='l1',
                        help="The pixel criterion of backward, default: l1.")
    parser.add_argument("--lambda_fit_forw", type=float, default=16,
                        help="lambda_fit_forw.")
    parser.add_argument("--lambda_rec_back", type=int, default=1,
                        help="lambda_rec_back.")
    parser.add_argument("--lambda_ce_forw", type=int, default=1,
                        help="lambda_ce_forw.")
    parser.add_argument("--gradient_clipping", type=int, default=10,
                        help="The values of gradient_clipping.")
    parser.add_argument('--gaussian_scale', type=int, default=1,
                        help='The gaussian scale of net.')

    # Optimizer specifications
    parser.add_argument("--weight_decay_G", type=float, default=1e-5,
                        help="Weight decay.")
    parser.add_argument('--betas', type=tuple, default=(0.9, 0.999),
                        help='ADAM beta.')
    parser.add_argument('--lr_G', type=float, default=2e-4,
                        help='Learning rate.')
    parser.add_argument('--lr_scheme', type=str, default="MultiStepLR",
                        help='The scheme of learning rate.')
    parser.add_argument('--lr_steps', type=list, default=[100000, 200000, 300000, 400000],
                        help='The step of learning rate decay.')
    parser.add_argument('--lr_gamma',type=float, default=0.5,
                        help='The gamma of learning rate.')

    args = parser.parse_args()
    mindspore.common.set_seed(10)

    # set context
    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, save_graphs=False)
    context.set_context(device_id=args.device_id)

    # distribute
    if args.run_distribute:
        init("nccl")
        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
                                          gradients_mean=True,
                                          device_num=args.device_num)

    context.set_context(max_call_depth=4030)

    # create train dataset
    train_ds = create_traindataset(dataset_path=args.dataset_GT, scale=args.scale,
                                   batch_size=args.batch_size, distribute=args.run_distribute)
    train_data_loader = train_ds.create_dict_iterator()

    step_size = train_ds.get_dataset_size()
    total_epochs = int(args.epochs)

    # learning rate
    wd_G = args.weight_decay_G if args.weight_decay_G else 0
    if args.lr_scheme == 'MultiStepLR':
        lr = warmup_step_lr(args.lr_G*2,
                            args.lr_steps,
                            step_size,
                            200,
                            total_epochs,
                            args.lr_gamma
                            )
    elif args.lr_scheme == 'CosineAnnealingLR_Restart':
        lr = warmup_cosine_annealing_lr(args.lr_G,
                                        args.lr_steps,
                                        0,
                                        total_epochs,
                                        args.restarts,
                                        args.eta_min)
    else:
        raise NotImplementedError(
            'MultiStepLR learning rate scheme is enough.')
    print("Total learning rate:{}".format(lr.shape))

    # define network
    net = define_G(args)

    # load checkpoint
    if args.resume_state is not None:
        param_dict = load_checkpoint(args.resume_state)
        load_param_into_net(net, param_dict)

    # net with loss
    loss = InvertibleRescalingNetLoss(args, net)

    # optimizer
    optimizer = nn.Adam(net.trainable_params(), learning_rate=Tensor(lr),
                        beta1=args.betas[0], beta2=args.betas[1],
                        weight_decay=wd_G)

    # train one step
    train_IRN = TrainOneStepInvertibleRescalingNet(loss, optimizer, args.gradient_clipping)
    train_IRN.set_train(True)

    # start training
    for epoch in range(int(args.start_epoch), total_epochs):
        print("training epoch:{:d}".format(epoch + 1))
        startTime = time.time()
        totalLoss = 0
        perLoss = 0
        for data in train_data_loader:
            hr = data['HR']
            lr = data['LR']
            perLoss = train_IRN(lr, hr)
            print("per Loss:{}".format(perLoss))
            totalLoss += perLoss
        steps = train_ds.get_dataset_size()
        time_elapsed = time.time() - startTime
        stepTime = time_elapsed / steps
        print("per step needs time:{:.0f}ms".format(stepTime * 1000))
        print("per epoch needs time:{:.8f}ms".format(time_elapsed * 1000))
        print("per epoch totalLoss:{:.8f}".format(totalLoss.asnumpy() / steps))

        # save checkpoint
        if (epoch + 1) % 50 == 0:
            if args.run_distribute == 0:
                save_checkpoint(train_IRN, './ckpt/InvertibleRescaleNet_%04d.ckpt' % (epoch + 1))
            else:
                if args.device_id == 0:
                    save_checkpoint(train_IRN, './ckpt/InvertibleRescaleNet_%04d.ckpt' % (epoch + 1))

        print("{:d}/{:d} epoch finished".format(epoch + 1, args.epochs))