# time: 2021/7/15:16:44
import time
import mindspore.dataset as ds
from mindspore import context, Model
from mindspore.profiler import Profiler
import mindspore.common.dtype as mstype
from mindspore.common import set_seed
# from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig

from models.net import DAM_Net, DAMNetWithLoss, DAMTrainOneStepCell, PredictWithSigmoid
from utils.callback import LossCallback, UbuntuTestCallBack, TimeMonitor
from utils.metric import UbuntuTestMetric
import utils.config


def train(args):
    config = args
    print('\n', config)

    # 设置环境，context
    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=2)

    # profiler = Profiler(output_path='./profiler_data')

    # 数据加载
    print("\nStart loading train data: ", config.train_data_path)
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    # dataset, dataset_len = get_dataset(data_path=config.data_path, mode="train", repeat=1, shard_num=config.rank,
    #                                    shard_id=config.group_size, batch_size=config.batch_size)
    train_dataset = ds.MindDataset(config.train_data_path,
                                   columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                   shuffle=True)
    train_dataset = train_dataset.batch(config.batch_size, drop_remainder=True)
    train_dataset = train_dataset.repeat(1)
    dataset_len = config.batch_size * train_dataset.get_dataset_size()
    print("dataset: ", train_dataset)
    print("dataset_len: ", dataset_len)
    print("dataset_size: ", train_dataset.get_dataset_size())
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    print("*************Finish loading data**************")

    # # Adam优化器
    # exponential_decay_lr = nn.ExponentialDecayLR(config.learning_rate, config.decay_rate, config.decay_steps,
    #                                              is_stair=True)
    # optimizer = nn.Adam(params=dam_net.trainable_params(), learning_rate=exponential_decay_lr)
    # manager_loss_scale = DynamicLossScaleManager()
    # amp_level = "O3"
    # model = Model(train_net, optimizer=optimizer, loss_scale_manager=manager_loss_scale)

    # 初始化模型
    dam_net = DAM_Net(config)

    print("\n*************model define************")
    print(dam_net.trainable_params())

    train_net = DAMNetWithLoss(dam_net)
    train_net = DAMTrainOneStepCell(train_net, config).to_float(mstype.float16)
    eval_net = PredictWithSigmoid(dam_net)
    metric = UbuntuTestMetric()
    model = Model(train_net, eval_network=eval_net, metrics={"Ubuntu": metric})

    # 定义callback
    time_cb = TimeMonitor(data_size=dataset_len)
    loss_cb = LossCallback(config.loss_file_name)
    cbs = [time_cb, loss_cb]

    # 保存模型文件地址
    # save_step = int(max(1, train_dataset.get_dataset_size() / (config.keep_checkpoint_max / config.epoch_size)))
    save_step = 117
    print('save_step: ', save_step)
    print('lr: ', config.learning_rate)
    print('decay_rate: ', config.decay_rate)
    print('decay_step: ', config.decay_steps)
    config_ck = CheckpointConfig(save_checkpoint_steps=save_step,
                                 keep_checkpoint_max=70)
    ckpoint_cb = ModelCheckpoint(prefix=config.model_name, directory=config.save_path, config=config_ck)
    cbs.append(ckpoint_cb)

    do_eval = False
    if do_eval:
        print('\nload eval data: ',  config.eval_data_path)
        eval_dataset = ds.MindDataset(config.eval_data_path,
                                      columns_list=["turns", "turn_len", "response", "response_len", "label"],
                                      shuffle=False)
        eval_dataset = eval_dataset.batch(config.batch_size, drop_remainder=True)
        eval_dataset = eval_dataset.repeat(1)
        eval_callback = UbuntuTestCallBack(model, eval_dataset, eval_file_path=config.eval_file_name)
        cbs.append(eval_callback)

    print("\n**************Starting training****************")
    model.train(epoch=config.epoch_size, train_dataset=train_dataset, callbacks=cbs, dataset_sink_mode=True, sink_size=4)
    print("***************Finish training*****************")

    # profiler.analyse()


if __name__ == '__main__':
    set_seed(1)
    train(utils.config.ubuntu_parse_args())
    # train(utils.config.douban_parse_args())
