# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
PanguAlpha train script
"""

import os
import math
import time
from mindspore import context
from mindspore.train.model import Model
import mindspore.communication.management as D
from mindspore.context import ParallelMode
import mindspore.nn as nn
from mindspore.train.callback import TimeMonitor, Callback, SummaryCollector
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
import mindspore.common.dtype as mstype
from mindspore.parallel import set_algo_parameters
from mindspore.parallel._cost_model_context import _set_multi_subgraphs
from mindspore.nn.wrap.cell_wrapper import _VirtualDatasetCell # PipelineCell,
from src.adam import AdamWeightDecayOp
from src.dataset import create_dataset
from src.pangu_alpha import PanguAlpha, PanguAlphaWithLoss, CrossEntropyLoss, EvalNet
from src.pangu_alpha_wrapcell import PanguAlphaTrainOneStepWithLossScaleCell, PanguAlphaTrainPipelineWithLossScaleCell
from src.pangu_alpha_config import PANGUALPHAConfig, set_parse
from src.utils import LearningRate, get_args, FP32StateAdamWeightDecay
from src.utils import download_data

from mycode.mytrain import find_latest_ckpt, MyF1, EvalAndSaveCallback

class LossCallBack(Callback):
    """
    Monitor the loss in training.
    If the loss in NAN or INF terminating training.
    """

    def __init__(self, dataset_size=-1, local_rank=0, has_trained_epoch=0, has_trained_step=0, micro_size=1):
        super(LossCallBack, self).__init__()
        self._dataset_size = dataset_size
        self.local_rank = local_rank
        self.has_trained_epoch = has_trained_epoch
        self.has_trained_step = has_trained_step
        self.micro_size = micro_size
        self.steps_loss = {"step": [], "loss": []}
        print("load has trained epoch :{} and step: {}".format(has_trained_epoch, has_trained_step), flush=True)

    def step_end(self, run_context):
        """
        Print loss after each step
        """
        cb_params = run_context.original_args()
        cur_step = cb_params.cur_step_num
        if self._dataset_size > 0 and self.local_rank % 8 == 0:
            percent, epoch_num = math.modf(cb_params.cur_step_num /
                                           self._dataset_size)
            if percent == 0:
                epoch_num -= 1
            date = time.asctime(time.localtime(time.time()))
            loss_value = cb_params.net_outputs[0].asnumpy() / self.micro_size
            self.steps_loss["loss"].append(loss_value)
            self.steps_loss["step"].append(cur_step)
            print("time: {} local_rank: {}, epoch: {}, step: {},\nloss is {}, overflow is {}, scale is {}".
                  format(date, int(self.local_rank), int(epoch_num) + int(self.has_trained_epoch),
                         cb_params.cur_step_num + int(self.has_trained_step), loss_value,
                         cb_params.net_outputs[1].asnumpy(), cb_params.net_outputs[2].asnumpy()))


project_root = os.path.abspath(
    os.path.dirname(os.path.realpath(__file__)) + os.path.sep + "..")
print('project_root:', project_root)


def set_weight_decay(params):
    """
    Set weight decay coefficient, zero for bias and layernorm, 1e-1 for rest
    """
    decay_filter = lambda x: 'layernorm' not in x.name.lower() and "bias" not in x.name.lower()
    decay_params = list(filter(decay_filter, params))
    other_params = list(filter(lambda x: not decay_filter(x), params))
    group_params = [{
        'params': decay_params,
        'weight_decay': 1e-1
    }, {
        'params': other_params,
        'weight_decay': 0.0
    }, {
        'order_params': params
    }]
    return group_params


def run_train(args_opt):
    r"""
    The main training process.
    """
    # Set execution mode
    context.set_context(mode=args_opt.set_context, device_target=args_opt.device_target)
    context.set_context(variable_memory_max_size="31GB") # only supports devices in ['Ascend']
    # Set parallel context
    if args_opt.distribute == "true":
        D.init()
        device_num = D.get_group_size()
        rank = D.get_rank()
        print("rank_id is {}, device_num is {}".format(rank, device_num))

        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(
            parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
            gradients_mean=False,
            full_batch=bool(args_opt.full_batch),
            strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path,
            enable_parallel_optimizer=bool(args_opt.optimizer_shard))
        set_algo_parameters(elementwise_op_strategy_follow=True)
        _set_multi_subgraphs()

    else:
        rank = 0
        device_num = 1
    context.set_context(save_graphs=False, save_graphs_path="./graphs_of_device_id_" + str(rank))
    # copy data from the cloud to the /cache/Data
    cache_url = '/cache/Data/'
    cache_url_dev = '/cache/Data_dev/'
    if args_opt.offline:
        cache_url = args_opt.data_url
        cache_url_dev = args_opt.data_dev_url
    else:
        download_data(src_data_url=args_opt.data_url, tgt_data_path=cache_url, rank=rank)
    # Set model property
    model_parallel_num = args_opt.op_level_model_parallel_num
    data_parallel_num = int(device_num / model_parallel_num)
    batch_size = args_opt.per_batch_size * data_parallel_num
    config = PANGUALPHAConfig(
        data_parallel_num=data_parallel_num,
        model_parallel_num=model_parallel_num,
        batch_size=batch_size,
        seq_length=args_opt.seq_length,
        vocab_size=args_opt.vocab_size,
        embedding_size=args_opt.embedding_size,
        num_layers=args_opt.num_layers,
        num_heads=args_opt.num_heads,
        expand_ratio=4,
        post_layernorm_residual=False,
        dropout_rate=0.1, # TODO: how to shut down during evaluation
        compute_dtype=mstype.float16,
        use_past=False, # use_past,
        self_layernorm=True, # #
        forward_reduce_scatter=False, # #
        stage_num=args_opt.stage_num,
        micro_size=args_opt.micro_size,
        eod_reset=bool(args_opt.eod_reset),
        load_ckpt_path=args_opt.load_ckpt_path,
        param_init_type=mstype.float32 if args_opt.param_init_type == 'fp32' else mstype.float16,
        word_emb_dp=bool(args_opt.word_emb_dp))
    print("===config is: ", config, flush=True)

    # Define network
    pangu_alpha = PanguAlpha(config)
    loss = CrossEntropyLoss(config)
    pangu_alpha_with_loss = PanguAlphaWithLoss(config, pangu_alpha, loss)
    # pangu_alpha_with_loss = _VirtualDatasetCell(pangu_alpha_with_loss)
    # Define eval network
    from src.tokenization_jieba import JIEBATokenizer
    tokenizer = JIEBATokenizer(os.path.join(args_opt.tokenizer_path, 'vocab.vocab'),
                               os.path.join(args_opt.tokenizer_path, 'vocab.model'))
    eval_net = EvalNet(pangu_alpha)
    eval_net.set_train(False)

    print("=====args_opt is: ", args_opt, flush=True)

    # Warm-up and cosine decay learning rate
    lr = LearningRate(learning_rate=args_opt.start_lr,
                      end_learning_rate=args_opt.end_lr,
                      warmup_steps=args_opt.warmup_step,
                      decay_steps=200000)

    params = pangu_alpha.trainable_params()
    group_params = set_weight_decay(params)
    if args_opt.optimizer == "lamb":
        optimizer = nn.Lamb(group_params, learning_rate=lr)
    elif args_opt.opt_offload:
        optimizer = AdamWeightDecayOp(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95)
    # elif args_opt.set_context:
    #     optimizer = nn.Momentum(group_params, learning_rate=lr, momentum=0.9, weight_decay=0.0)
    else:
        optimizer = FP32StateAdamWeightDecay(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95)
    # Initial scaling sens
    loss_scale_value = math.pow(2, 32)
    epoch_num = args_opt.epoch_size
    # Dataset loading mindrecord files
    ds_dev = create_dataset(1, data_path=cache_url_dev,
                        data_start_index=0, eod_reset=config.eod_reset, full_batch=bool(args_opt.full_batch),
                        eod_id=args_opt.eod_id, device_num=device_num, rank=rank,
                        column_name=args_opt.data_column_name, epoch=epoch_num) # create_dataset_dev
    ds_dev2 = create_dataset(1, data_path=cache_url_dev,
                        data_start_index=0, eod_reset=config.eod_reset, full_batch=bool(args_opt.full_batch),
                        eod_id=args_opt.eod_id, device_num=device_num, rank=rank,
                        column_name=args_opt.data_column_name, epoch=epoch_num) # create_dataset_dev
    ds_dev = ds_dev.shuffle(buffer_size=1000)
    ds_dev2 = ds_dev2.shuffle(buffer_size=1000)
    ds = create_dataset(config.batch_size, data_path=cache_url,
                        data_start_index=0, eod_reset=config.eod_reset, full_batch=bool(args_opt.full_batch),
                        eod_id=args_opt.eod_id, device_num=device_num, rank=rank,
                        column_name=args_opt.data_column_name, epoch=epoch_num)
    step_per_epoch = ds.get_dataset_size()
    callback_size = args_opt.sink_size
    actual_epoch_num = int(epoch_num * step_per_epoch / callback_size)
    pretrain_ckpt_path, has_trained_epoch, has_trained_step = find_latest_ckpt(
        data_path=os.path.join(args_opt.train_url, 'checkpoint/'),
        has_trained=False
    ) # TODO
    has_trained_step = (has_trained_epoch-1)*callback_size + has_trained_step
    update_cell = DynamicLossScaleUpdateCell(loss_scale_value=loss_scale_value, scale_factor=2, scale_window=1000)
    pangu_alpha_with_grads = PanguAlphaTrainOneStepWithLossScaleCell(
        pangu_alpha_with_loss, optimizer=optimizer, scale_update_cell=update_cell, enable_global_norm=False,
        config=config) # default is enable_global_norm=True
    myf1 = MyF1()
    # myem = MyEm()
    model = Model(
        pangu_alpha_with_grads,
        # eval_network=eval_net,
        # eval_indexes=[0,1,2], # positions of loss value, predicted value and label of eval_net
        # metrics={
        #     # "Em": myem,
        #     "F1": myf1
        # }
    )
    model_predict = Model(eval_net)
    if pretrain_ckpt_path:
        print(f"Loading from path {pretrain_ckpt_path}", flush=True)
        from mindspore.train.serialization import load_checkpoint, load_param_into_net
        param_dict = load_checkpoint(pretrain_ckpt_path)
        load_param_into_net(model.train_network, param_dict) # load to eval net due to mechanism of Parameter
        has_trained_step = param_dict['current_iterator_step'].asnumpy().reshape(-1,)[0] - 1
        has_trained_epoch = has_trained_step / callback_size
        print('#### Load ckpt success!!! ####')
        time.sleep(2)
    if args_opt.incremental_training:
        from mindspore.train.serialization import load_distributed_checkpoint
        strategy = model.infer_train_layout(train_dataset=ds, sink_size=callback_size)
        print("======start load_distributed checkpoint", flush=True)
        # For 2.6B and 13B models, the number of ckpt files is 512.
        ckpt_name = 'filerted'
        ckpt_file_list = [os.path.join(args_opt.load_ckpt_path, f"{ckpt_name}_{ckpt_rank}.ckpt") for ckpt_rank in
                          range(0, 512)]
        print(f"Loading from path {ckpt_file_list[0]}", flush=True)
        # Load checkpoint files
        load_distributed_checkpoint(model.train_network, ckpt_file_list, strategy)
    print("Dataset size: {}, actual_epoch_num: {}".format(ds.get_dataset_size(), actual_epoch_num), flush=True)
    call_loss = LossCallBack(callback_size, rank, int(has_trained_epoch), int(has_trained_step))
    call_eval = EvalAndSaveCallback(736, model_predict, ds_dev,
                            os.path.join(args_opt.train_url, 'checkpoint/'),
                            int(has_trained_epoch), int(has_trained_step),
                            # myem=myem,
                            myf1=myf1,
                            args_opt=args_opt,
                            tokenizer=tokenizer) # TODO: 18 steps per evaluation
    config_ck = CheckpointConfig(save_checkpoint_steps=46, keep_checkpoint_max=3)
    callback = [
        TimeMonitor(callback_size), # para: step_per_actual_epoch=2
        call_loss,
        call_eval,
        ModelCheckpoint(prefix='pangu_cmrc2018', directory=os.path.join(args_opt.train_url, 'checkpoint/'), config=config_ck),
        SummaryCollector(summary_dir=os.path.join(args_opt.train_url, 'summary_dir/'))
    ]
    # from mycode.mytrain import eval_gener
    # out = eval_gener(ds_dev2,
    #                  model_predict,
    #                  args_opt,
    #                  tokenizer,
    #                  myf1)
    # print("=====" * 5 + "generation eval() initialized: {}".format(out))
    model.train(actual_epoch_num, ds, callbacks=callback, sink_size=callback_size, dataset_sink_mode=True)
    print("model training over!")
    try:
        import moxing as mox
        pretrain_ckpt_path, j, i = find_latest_ckpt(
            data_path=os.path.join(args_opt.train_url, 'checkpoint/'),
            has_trained=True
        )
        if pretrain_ckpt_path:
            mox.file.copy(src_url=pretrain_ckpt_path,
                          dst_url="obs://mytest2333/ckpt/"+os.path.basename(pretrain_ckpt_path))
            # 刷包或下载数据集结束后，写一个文件来表示下载成功
            f = open("%s/save_ckpt.txt" % (project_root), 'w')
            f.close()
            # 此处用于阻塞其他进程，直到刷包以及下载数据集完成为止
            while not os.path.exists("%s/save_ckpt.txt" % (project_root)):
                time.sleep(1)
            print("Save best ckpt to path: {}".
                    format("obs://mytest2333/ckpt/"+os.path.basename(pretrain_ckpt_path)))
    except:
       raise ValueError("Please install moxing.")
    if args_opt.export:
        try:
            from predict import export_mindir
            data_path = os.path.join(args_opt.train_url, 'mindir')
            prefix = os.path.join(data_path,'pangu_alpha')
            os.system('rm -f {}/*'.format(data_path))
            config.batch_size = 1
            export_mindir(model_predict, config, prefix)
            files = os.listdir(data_path)
            data = [
                os.path.join(data_path, name) for name in files
                # if name.endswith(".ckpt")
            ]
            for item in data:
                mox.file.copy(src_url=item,
                              dst_url="obs://mytest2333/mindir/"+os.path.basename(item))
            # 刷包或下载数据集结束后，写一个文件来表示下载成功
            f = open("%s/save_mindir.txt" % (project_root), 'w')
            f.close()
            # 此处用于阻塞其他进程，直到刷包以及下载数据集完成为止
            while not os.path.exists("%s/save_mindir.txt" % (project_root)):
                time.sleep(1)
            print("Save mindir to path: {}".format("obs://mytest2333/mindir/"))
        except:
            raise ValueError("export mindir failed.")
        import matplotlib.pyplot as plt
        steps_loss = call_loss.steps_loss
        plt.plot(steps_loss["step"], steps_loss["loss"], "red")
        plt.xlabel("step number")
        plt.ylabel("Model loss")
        plt.title("Model loss variation chart")
        plt.show()

        steps_eval = call_eval.steps_eval
        plt.plot(steps_eval["step"], steps_eval["F1"], "red")
        plt.xlabel("step number")
        plt.ylabel("Model F1")
        plt.title("Model F1 variation chart")
        plt.show()
        


def run_train_pipeline(args_opt):
    r"""
    The main training process in pipeline.
    """
    context.set_context(save_graphs=False, mode=context.GRAPH_MODE, device_target=args_opt.device_target)
    context.set_context(variable_memory_max_size="31GB")
    if args_opt.distribute == "true":
        D.init()
        device_num = D.get_group_size()
        rank_id = D.get_rank()
        print("rank_id is {}, device_num is {}".format(rank_id, device_num))
        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(
            parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
            gradients_mean=False,
            full_batch=bool(args_opt.full_batch),
            loss_repeated_mean=True,
            device_num=device_num,
            enable_parallel_optimizer=bool(args_opt.optimizer_shard),
            pipeline_stages=args_opt.stage_num)
        set_algo_parameters(elementwise_op_strategy_follow=True)
        _set_multi_subgraphs()
    else:
        rank_id = int(os.getenv("RANK_ID"))
        device_num = 1
    # copy data from the cloud to the /cache/Data
    cache_url = '/cache/Data/'
    if args_opt.offline:
        cache_url = args_opt.data_url
    else:
        download_data(src_data_url=args_opt.data_url, tgt_data_path=cache_url, rank=rank_id)
    model_parallel_num = args_opt.op_level_model_parallel_num
    stage_device_num = int(device_num / args_opt.stage_num)
    data_parallel_num = int(stage_device_num / model_parallel_num)
    per_batch_size = args_opt.per_batch_size
    batch_size = per_batch_size * data_parallel_num * args_opt.micro_size
    config = PANGUALPHAConfig(
        data_parallel_num=data_parallel_num,
        model_parallel_num=model_parallel_num,
        batch_size=batch_size,
        seq_length=args_opt.seq_length,
        vocab_size=args_opt.vocab_size,
        embedding_size=args_opt.embedding_size,
        num_layers=args_opt.num_layers,
        num_heads=args_opt.num_heads,
        expand_ratio=4,
        post_layernorm_residual=False,
        dropout_rate=0.1,
        compute_dtype=mstype.float16,
        use_past=False,
        stage_num=args_opt.stage_num,
        micro_size=args_opt.micro_size,
        word_emb_dp=bool(args_opt.word_emb_dp))
    print("===config is: ", config, flush=True)
    pangu_alpha = PanguAlpha(config)
    loss = CrossEntropyLoss(config)
    pangu_alpha_with_loss = PipelineCell(PanguAlphaWithLoss(config, pangu_alpha, loss), config.micro_size)
    pangu_alpha_with_loss = _VirtualDatasetCell(pangu_alpha_with_loss)
    print("=====args_opt is: ", args_opt, flush=True)
    lr = LearningRate(learning_rate=args_opt.start_lr, end_learning_rate=args_opt.end_lr,
                      warmup_steps=args_opt.warmup_step, decay_steps=args_opt.decay_steps)
    params = pangu_alpha.infer_param_pipeline_stage()
    group_params = set_weight_decay(params)
    if args_opt.optimizer == "lamb":
        optimizer = nn.Lamb(group_params, learning_rate=lr)
    elif args_opt.opt_offload:
        optimizer = AdamWeightDecayOp(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95)
    else:
        optimizer = nn.AdamWeightDecay(group_params, learning_rate=lr, beta1=0.9, beta2=0.95, eps=1e-8)

    ds = create_dataset(config.batch_size, data_path=cache_url, device_num=stage_device_num,
                        rank=rank_id % stage_device_num, eod_reset=True, data_start_index=0,
                        full_batch=context.get_auto_parallel_context("full_batch"),
                        column_name=args_opt.data_column_name)
    epoch_num = args_opt.epoch_size
    step_per_epoch = ds.get_dataset_size()
    callback_size = args_opt.sink_size
    actual_epoch_num = int(epoch_num * step_per_epoch / callback_size)
    callback = [TimeMonitor(callback_size), LossCallBack(callback_size, rank_id, micro_size=config.micro_size)]
    loss_scale_value = math.pow(2, 32)
    update_cell = DynamicLossScaleUpdateCell(loss_scale_value=loss_scale_value, scale_factor=2, scale_window=1000)
    pangu_alpha_with_grads = PanguAlphaTrainPipelineWithLossScaleCell(
        pangu_alpha_with_loss, optimizer=optimizer, config=config, scale_update_cell=update_cell)
    model = Model(pangu_alpha_with_grads)
    model.train(actual_epoch_num, ds, callbacks=callback,
                sink_size=callback_size, dataset_sink_mode=True)


if __name__ == "__main__":
    opt = get_args(inference=True)
    basepath = os.path.dirname(__file__)
    opt.data_url = os.path.join(basepath, opt.data_url) # for train
    opt.train_url = os.path.join(basepath, opt.train_url)
    opt.tokenizer_path = os.path.join(basepath, opt.tokenizer_path)
    opt.data_dev_url = os.path.join(basepath, opt.data_dev_url) # for evaluation
    del basepath
    set_parse(opt)
    if opt.per_batch_size == 0:
        raise ValueError("The per_batch_size has not been configured.")
    if opt.stage_num > 1:
        run_train_pipeline(opt)
    else:
        run_train(opt)
