# time: 2021/7/15:16:44
import os
import time
import random
import mindspore
import numpy as np
import mindspore.nn as nn
import mindspore.dataset as ds
from mindspore import context, Model
from mindspore.common import set_seed
from mindspore.nn import ExponentialDecayLR
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager

from models.net import DAM_Net, DAMNetWithLoss, DAMTrainOneStepCell, PredictWithNet
from utils.callback import LossCallback, TimeMonitor, EvalCallBack
from utils.metric import EvalMetric
import utils.config


def prepare_seed(seed):
    print("seed: ", seed)
    mindspore.set_seed(seed)
    mindspore.dataset.config.set_seed(seed)
    random.seed(seed)
    np.random.seed(seed)


def train(args):
    config = args
    prepare_seed(1)
    print('\n', config)

    # 设置环境，context
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=7)

    times_name = "_17.log"
    loss_file_path = config.loss_file_name + times_name
    eval_file_name = config.eval_file_name + times_name

    # 数据加载
    print("\nStarting loading data: ", config.train_data_path)
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    train_dataset = ds.MindDataset(config.train_data_path,
                                   columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                   shuffle=True)
    train_dataset = train_dataset.batch(config.batch_size, drop_remainder=True)
    train_dataset = train_dataset.repeat(1)
    batch_num = train_dataset.get_dataset_size()
    print("dataset: ", train_dataset)
    print("dataset_len: ", batch_num * config.batch_size)
    print("dataset_size: ", batch_num)
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    print("*************Finish loading data**************")

    # 初始化模型
    print("\n************model define************")
    dam_net = DAM_Net(config)
    print('trainable_params: ', dam_net.trainable_params())

    lr_schedule = ExponentialDecayLR(config.learning_rate, config.decay_rate, config.decay_steps, is_stair=True)
    optimizer = nn.Adam(params=dam_net.trainable_params(), learning_rate=lr_schedule)

    train_net = DAMNetWithLoss(dam_net)
    train_net = DAMTrainOneStepCell(train_net, optimizer)
    eval_net = PredictWithNet(dam_net)
    metric = EvalMetric(config.model_name)
    model = Model(train_net, eval_network=eval_net, metrics={"Accuracy": metric})

    # 定义回调函数
    time_cb = TimeMonitor(data_size=batch_num)
    loss_cb = LossCallback(loss_file_path=loss_file_path)
    cbs = [time_cb, loss_cb]

    # 保存模型文件地址
    save_step = int(max(1, batch_num / 10))
    config_ck = CheckpointConfig(save_checkpoint_steps=save_step,
                                 keep_checkpoint_max=30)
    ckpoint_cb = ModelCheckpoint(prefix=config.model_name, directory=config.save_path, config=config_ck)
    cbs.append(ckpoint_cb)

    # 定义
    if config.do_eval:
        print('\nloading eval data: ', config.eval_data_path)
        eval_dataset = ds.MindDataset(config.eval_data_path,
                                      columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                      shuffle=False)
        eval_dataset = eval_dataset.batch(config.eval_batch_size, drop_remainder=True)
        eval_dataset = eval_dataset.repeat(1)
        print('eval_dataset_size: ', eval_dataset.get_dataset_size())
        eval_callback = EvalCallBack(model, eval_dataset, eval_per_steps=save_step, eval_file_path=eval_file_name)
        cbs.append(eval_callback)

    print("\n**************Starting training****************")
    print('batch_size: ', config.batch_size)
    print('lr: ', config.learning_rate)
    print('decay_rate: ', config.decay_rate)
    print('decay_step: ', config.decay_steps)
    print('save_step: ', save_step)
    model.train(epoch=config.epoch_size, train_dataset=train_dataset, callbacks=cbs, dataset_sink_mode=False)
    print("final learning rate: ", optimizer.get_lr())
    print("***************Finish training*****************")


if __name__ == '__main__':
    train(utils.config.douban_parse_args())
