# time: 2021/7/15:16:44
import os
import time
import random
import argparse
import mindspore
import numpy as np
import moxing as mox
import mindspore.nn as nn
import mindspore.dataset as ds
from mindspore.common import set_seed
from mindspore.profiler import Profiler
import mindspore.common.dtype as mstype
from mindspore.context import ParallelMode
from mindspore.train.callback import Callback
from mindspore import context, Model, save_checkpoint, Tensor
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.serialization import load_param_into_net, load_checkpoint
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor

from models.net import DAM_Net, ModelBuilder, DAMNetWithLoss, DAMTrainOneStepCell, PredictWithSigmoid
from utils.callback import LossCallback, UbuntuTestCallBack, TimeMonitor, EvalCallBack
from utils.metric import UbuntuTestMetric, DoubanTestMetric, EvalMetric
import utils.douban_evaluation as db_eval
import utils.evaluation as ub_eval
import utils.config

mox.file.shift('os', 'mox')

# set_seed(1)

device_num = int(os.getenv('RANK_SIZE'))
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


def prepare_seed(seed):
    print("seed: ", seed)
    mindspore.set_seed(seed)
    mindspore.dataset.config.set_seed(seed)
    random.seed(seed)
    np.random.seed(seed)


def train(args):
    print("device_num: ", device_num)
    print("device_id: ", device_id)
    config = args
    prepare_seed(1)
    print('\n', config)

    # 需要将OBS中存储的数据下载至执行容器
    root = "/cache/"
    # obs_data_path = "obs://harbin-engineering-uni/DAM/data/ubuntu"  # OBS中的数据路径
    obs_data_path = config.data_url
    obs_train_path = os.path.join(config.train_url, str(config.time_))
    if config.model_name == "DAM_ubuntu":
        local_data_path = os.path.join(root, "ubuntu_data")  # 云上本地数据路径
        local_train_path = os.path.join(root, 'dam/save_checkpoints/ubuntu')
    elif config.model_name == "DAM_douban":
        local_data_path = os.path.join(root, "douban_data")
        local_train_path = os.path.join(root, 'dam/save_checkpoints/douban')
    else:
        raise RuntimeError('{} does not exist'.format(config.model_name))

    # 设置环境 需要对创建数据集的代码和本地数据路径进行适配
    if device_num > 1:
        # 配置分布式策略
        context.set_auto_parallel_context(device_num=device_num,
                                          parallel_mode=ParallelMode.DATA_PARALLEL,
                                          parameter_broadcast=True,
                                          gradients_mean=True)
        init()
        local_data_path = os.path.join(local_data_path, str(device_id))

    local_train_path = os.path.join(local_train_path, str(config.time_))
    mox.file.make_dirs(local_train_path)
    print("############## Downloading data from OBS ##############")
    mox.file.copy_parallel(src_url=obs_data_path, dst_url=local_data_path)  # 将obs的数据拷贝到云上的本地
    print("############### Downloading is completed ##############")

    local_loss_file_name = os.path.join(local_train_path, "loss.log")
    local_eval_file_name = os.path.join(local_train_path, "eval.log")

    # 数据加载
    train_data_path = os.path.join(local_data_path, "data_train.mindrecord")
    print("\nStart loading train data: ", train_data_path)
    train_dataset = ds.MindDataset(train_data_path,
                                   columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                   shuffle=False, num_shards=device_num, shard_id=device_id)
    batch_size = int(config.batch_size // device_num) if device_num > 1 else config.batch_size
    batch_num = int(train_dataset.get_dataset_size() / batch_size)
    save_step = int(max(1, batch_num / 10))
    print("*************Finish loading data**************")

    # 初始化模型
    print("\n************model define************")
    config.emb_init = os.path.join(local_data_path, "word_embedding.pkl")
    dam_net = DAM_Net(config)
    print(dam_net.trainable_params())

    train_net = DAMNetWithLoss(dam_net)
    train_net = DAMTrainOneStepCell(train_net, config)
    eval_net = PredictWithSigmoid(dam_net)

    # 保存模型文件地址
    print('batch_size: ', batch_size)
    print('lr: ', config.learning_rate)
    print('decay_rate: ', config.decay_rate)
    print('decay_step: ', config.decay_steps)
    save_checkpoint_path = os.path.join(local_train_path, str(device_id))
    mox.file.make_dirs(save_checkpoint_path)

    score_file_path = os.path.join(local_train_path, "score")
    score_file_path = os.path.join(score_file_path, str(device_id))
    mox.file.make_dirs(score_file_path)

    eval_data_path = os.path.join(local_data_path, "data_val.mindrecord")
    print('\nStart loading eval data: ', eval_data_path)
    eval_dataset = ds.MindDataset(eval_data_path,
                                  columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                  shuffle=False, num_shards=None, shard_id=None)
    eval_dataset = eval_dataset.batch(config.eval_batch_size, drop_remainder=True)
    eval_dataloader = eval_dataset.create_dict_iterator()
    print("eval_dataset.size: ", eval_dataset.get_dataset_size())
    print("eval_dataset.len: ", eval_dataset.get_dataset_size() * config.eval_batch_size)

    argsDict = config.__dict__
    if device_id == 0 and device_num > 1:
        with open(local_eval_file_name, 'a+') as out_file:
            out_file.write("--------------- start ---------------\n")
            for eachArg, value in argsDict.items():
                out_file.write(eachArg + ' : ' + str(value) + '\n')
            out_file.write("---------------- end ----------------\n\n")

    print("\n**************Starting training****************")
    step = 0
    average_loss = 0.0
    best_result = [0, 0, 0, 0]
    for epoch in range(config.epoch_size):
        train_dataset = train_dataset.shuffle(256)
        print("response: ", train_dataset["response"])
        train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
        batch_num = train_dataset.get_dataset_size()
        print("dataset: ", train_dataset)
        print("dataset_size: ", batch_num)
        print("dataset_len: ", batch_size * batch_num)
        train_iterator = train_dataset.create_dict_iterator()
        for i, data in enumerate(train_iterator):
            turns = Tensor(data["turns"], mstype.int32)
            every_turn_len = Tensor(data["turn_len"], mstype.int32)
            response = Tensor(data["response"], mstype.int32)
            response_len = Tensor(data["response_len"], mstype.int32)
            labels = Tensor(data["label"], mstype.int32)

            loss = train_net(turns, every_turn_len, response, response_len, labels)

            step += 1
            print("epoch: {}, step: {}, global_step: {}, loss: ".format(epoch, i, step, loss))
            with open(local_loss_file_name, "a+") as loss_file:
                loss_file.write("{}\t{}\n".format(step, loss))
            # 验证
            if (step % save_step == 0 and step > 0) or step % batch_num == 0:
                score_file_path = os.path.join(score_file_path, "score." + str(step))
                score_file = open(score_file_path, 'w')

                print("**************Starting test****************")
                print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
                for it, data_t in enumerate(eval_dataloader, start=1):
                    turns = Tensor(data_t["turns"], mstype.int32)
                    every_turn_len = Tensor(data_t["turn_len"], mstype.int32)
                    response = Tensor(data_t["response"], mstype.int32)
                    response_len = Tensor(data_t["response_len"], mstype.int32)
                    labels = Tensor(data_t["label"], mstype.int32)

                    scores, labels = eval_net(turns, every_turn_len, response, response_len, labels)
                    # scores = dam_net(turns, response)
                    scores = scores.reshape([-1])
                    labels = labels.reshape([-1])

                    print('write scores ', it, "iter")
                    for index in range(config.eval_batch_size):
                        score_file.write(str(scores[index]) + '\t' + str(labels[index]) + '\n')
                score_file.close()
                print("***************Finish test*****************")
                print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

                if config.model_name == 'DAM_ubuntu':
                    result = ub_eval.evaluate(score_file_path)
                elif config.model_name == 'DAM_douban':
                    result = db_eval.evaluate(score_file_path)
                else:
                    result = None

                if result:
                    step_str = "Epoch:{}, step:{}, global_step:{}".format(epoch, i, step)
                    with open(local_eval_file_name, 'a+') as out_file:
                        out_file.write(step_str + '\n')
                        for p_at in result:
                            out_file.write(str(p_at) + '\n')
                else:
                    print("Error! result is None.")
                print('finish evaluation')
                print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

                if result[1] + result[2] > best_result[1] + best_result[2]:
                    best_result = result
                    save_checkpoint(dam_net, os.path.join(save_checkpoint_path, "best.ckpt"))
                    print("success saving model.")

    print("***************Finish training*****************")

    mox.file.copy_parallel(src_url=local_train_path, dst_url=obs_train_path)


if __name__ == '__main__':
    # train(utils.config.ubuntu_parse_args())
    train(utils.config.douban_parse_args())
