# time: 2021/7/15:16:44
import os
import time
import random
import mindspore
import numpy as np
import moxing as mox
import mindspore.nn as nn
import mindspore.dataset as ds
from mindspore import context, Model
from mindspore.common import set_seed
from mindspore.profiler import Profiler
from mindspore.context import ParallelMode
from mindspore.nn import ExponentialDecayLR
from mindspore.train.callback import Callback
from mindspore.communication.management import init
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager

from models.net import DAM_Net, DAMNetWithLoss, DAMTrainOneStepCell, PredictWithNet
from utils.callback import LossCallback, TimeMonitor, EvalCallBack
from utils.metric import EvalMetric
import utils.config

mox.file.shift('os', 'mox')


device_num = int(os.getenv('RANK_SIZE'))
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


class CopyCallBack(Callback):
    def __init__(self, local_train_url, train_url):
        super(CopyCallBack, self).__init__()
        self.local_train_url = local_train_url
        self.train_url = train_url

    def epoch_end(self, run_context):
        print("copy to obs")
        mox.file.copy_parallel(src_url=self.local_train_url, dst_url=self.train_url)


def prepare_seed(seed):
    print("seed: ", seed)
    mindspore.set_seed(seed)
    mindspore.dataset.config.set_seed(seed)
    random.seed(seed)
    np.random.seed(seed)


def train(args):
    print("device_num: ", device_num)
    print("device_id: ", device_id)
    config = args
    prepare_seed(1)
    print('\n', config)

    # path init
    root = "/cache/"
    obs_data_path = config.data_url
    obs_train_path = os.path.join(config.train_url, str(config.time_))
    if config.model_name == "DAM_ubuntu":
        local_data_path = os.path.join(root, "ubuntu_data")  # 云上本地数据路径
        local_train_path = os.path.join(root, 'dam/save_checkpoints/ubuntu')
    elif config.model_name == "DAM_douban":
        local_data_path = os.path.join(root, "douban_data")
        local_train_path = os.path.join(root, 'dam/save_checkpoints/douban')
    else:
        raise RuntimeError('{} does not exist'.format(config.model_name))

    # context init
    if device_num > 1:
        context.set_auto_parallel_context(device_num=device_num,
                                          parallel_mode=ParallelMode.DATA_PARALLEL,
                                          parameter_broadcast=True,
                                          gradients_mean=True)
        init()
        local_data_path = os.path.join(local_data_path, str(device_id))

    local_train_path = os.path.join(local_train_path, str(config.time_))
    mox.file.make_dirs(local_train_path)
    print("############## Downloading data from OBS ##############")
    mox.file.copy_parallel(src_url=obs_data_path, dst_url=local_data_path)  # 将obs的数据拷贝到云上的本地
    print("############### Downloading is completed ##############")

    local_loss_file_name = os.path.join(local_train_path, "loss.log")
    local_eval_file_name = os.path.join(local_train_path, "eval.log")

    # training data load
    repeat_num = 1
    train_data_path = os.path.join(local_data_path, "data_train.mindrecord")
    print("\nStart loading train data: ", train_data_path)
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    train_dataset = ds.MindDataset(train_data_path,
                                   columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                   shuffle=True, num_shards=device_num, shard_id=device_id)
    batch_size = int(config.batch_size // device_num) if device_num > 1 else config.batch_size
    train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
    train_dataset = train_dataset.repeat(repeat_num)
    batch_num = train_dataset.get_dataset_size()
    print("dataset: ", train_dataset)
    print("dataset_size: ", batch_num)
    print("dataset_len: ", batch_size * batch_num)
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    print("*************Finish loading data**************")

    # model init
    print("\n************model define************")
    config.emb_init = os.path.join(local_data_path, "word_embedding.pkl")
    dam_net = DAM_Net(config)
    print(dam_net.trainable_params())

    lr_schedule = ExponentialDecayLR(config.learning_rate, config.decay_rate, config.decay_steps, is_stair=True)
    optimizer = nn.Adam(params=dam_net.trainable_params(), learning_rate=lr_schedule)

    train_net = DAMNetWithLoss(dam_net)
    train_net = DAMTrainOneStepCell(train_net, optimizer)
    eval_net = PredictWithNet(dam_net)
    metric = EvalMetric(config.model_name)
    model = Model(train_net, eval_network=eval_net, metrics={"Accuracy": metric})

    # define callback
    time_cb = TimeMonitor(data_size=batch_num)
    loss_cb = LossCallback(loss_file_path=local_loss_file_name)
    cbs = [time_cb, loss_cb]

    # checkpoint save path
    save_step = int(max(1, (batch_num / repeat_num) / 10))
    config_ck = CheckpointConfig(save_checkpoint_steps=save_step, keep_checkpoint_max=80)
    save_checkpoint_path = os.path.join(local_train_path, str(device_id))
    ckpoint_cb = ModelCheckpoint(prefix="DAM", directory=save_checkpoint_path, config=config_ck)
    cbs.append(ckpoint_cb)

    if config.do_eval:
        eval_data_path = os.path.join(local_data_path, "data_val.mindrecord")
        print('\nStart loading eval data: ', eval_data_path)
        eval_dataset = ds.MindDataset(eval_data_path,
                                      columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                      shuffle=False, num_shards=None, shard_id=None)
        eval_dataset = eval_dataset.batch(config.eval_batch_size, drop_remainder=True)
        eval_dataset = eval_dataset.repeat(1)
        print("eval_dataset.size: ", eval_dataset.get_dataset_size())
        print("eval_dataset.len: ", eval_dataset.get_dataset_size() * config.eval_batch_size)
        if config.model_name == "DAM_ubuntu":
            eval_per_steps = save_step
        else:
            eval_per_steps = save_step
        print("eval_per_steps: ", eval_per_steps)
        eval_callback = EvalCallBack(model, eval_dataset, eval_per_steps=eval_per_steps,
                                     eval_file_path=local_eval_file_name)
        cbs.append(eval_callback)

    # 定义拷贝callback，在epoch结束的时候将云上本地数据拷贝回obs的桶中
    if device_num > 1 and device_id == 0:
        copy_callback = CopyCallBack(local_train_path, obs_train_path)
        cbs.append(copy_callback)

    argsDict = config.__dict__
    with open(local_eval_file_name, 'a+') as out_file:
        out_file.write("--------------- start ---------------\n")
        for eachArg, value in argsDict.items():
            out_file.write(eachArg + ' : ' + str(value) + '\n')
        out_file.write("---------------- end ----------------\n\n")

    print("\n**************Starting training****************")
    print('batch_size: ', batch_size)
    print('lr: ', config.learning_rate)
    print('decay_rate: ', config.decay_rate)
    print('decay_step: ', config.decay_steps)
    print('save_step: ', save_step)
    model.train(epoch=config.epoch_size, train_dataset=train_dataset, callbacks=cbs, dataset_sink_mode=False)
    print("final learning rate: ", optimizer.get_lr())
    print("***************Finish training*****************")


if __name__ == '__main__':
    # train(utils.config.ubuntu_parse_args())
    train(utils.config.douban_parse_args())

