from tqdm import tqdm
import numpy as np
from mindspore import load_checkpoint, load_param_into_net, save_checkpoint
import mindspore.nn as nn
import data_util_2
import mindspore.dataset as ds
import os
import model_tiny
import mindspore as ms
#from mindvision.engine.callback import ValAccMonitor

def train_one_epoch(model, train_dataset, epoch=0):
    model.set_train()
    total = train_dataset.get_dataset_size()
    loss_total = 0
    step_total = 0
    with tqdm(total=total) as t:
        t.set_description('Epoch %i' % epoch)
        for i in train_dataset.create_tuple_iterator():
            loss = model(*i)
            loss_total += loss.asnumpy()
            step_total += 1
            t.set_postfix(loss=loss_total/step_total)
            t.update(1)


def binary_accuracy(preds, y):
    """
    计算每个batch的准确率
    """

    # 对预测值进行四舍五入
    rounded_preds = np.around(preds)
    correct = (rounded_preds == y).astype(np.float32)
    acc = correct.sum() / len(correct)
    return acc


def evaluate(model, test_dataset, criterion, epoch=0):
    total = test_dataset.get_dataset_size()
    epoch_loss = 0
    epoch_acc = 0
    step_total = 0
    model.set_train(False)

    with tqdm(total=total) as t:
        t.set_description('Epoch %i' % epoch)
        for i in test_dataset.create_tuple_iterator():
            predictions = model(i[0])
            loss = criterion(predictions, i[1])
            epoch_loss += loss.asnumpy()

            acc = binary_accuracy(predictions.asnumpy(), i[1].asnumpy())
            epoch_acc += acc

            step_total += 1
            t.set_postfix(loss=epoch_loss/step_total, acc=epoch_acc/step_total)
            t.update(1)

    return epoch_loss / total

def training_amazon(net, loss, imdb_train, imdb_valid, ckpt_file_name, lr,epochs):
    net_with_loss = nn.WithLossCell(net, loss)
    optimizer = nn.Adam(net.trainable_params(), learning_rate=lr)
    train_one_step = nn.TrainOneStepCell(net_with_loss, optimizer)
    best_valid_loss = float('inf')

    for epoch in range(epochs):
        train_one_epoch(train_one_step, imdb_train, epoch)
        valid_loss = evaluate(net, imdb_valid, loss, epoch)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            save_checkpoint(net, ckpt_file_name)

def traning_process(args_opt):
    # load data
    raw=data_util_2.load_dataset(args_opt.train_data_path)
    print('load data successfully')

    # data preprocessing
    vocab, embeddings = data_util_2.load_glove(args_opt.glove_path,args_opt.glove_dimention)
    dataset=data_util_2.data_preprocess(raw,vocab,embeddings,args_opt.batch_size)
    print('pre-process data successfully')

    #dataset split because we got no validation dataset
    ds.config.set_seed(58)
    train_ratio=args_opt.train_ratio
    train_dataset,valid_dataset=dataset.split([train_ratio,1-train_ratio])
    print('split the dataset successfully')

    # setting some hyper parameters
    hidden_size = args_opt.hidden_size
    output_size = args_opt.class_num
    num_layers = args_opt.num_layers
    bidirectional = True
    dropout = args_opt.dropout
    pad_idx = vocab.tokens_to_ids('<pad>')
    ckpt_file_name = os.path.join(args_opt.out_path, 'amazon_lstm.ckpt')
    if args_opt.loss=='softmaxcrossentropywithlogis':
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
    else :
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)

    # construct model
    net = model_tiny.RNN(embeddings, hidden_size, output_size, num_layers, bidirectional, dropout, pad_idx)
    print('load model successully')

    # use amazon_review for training and save model
    training_amazon(net, loss, train_dataset, valid_dataset, ckpt_file_name, args_opt.lr,args_opt.epochs)

def train(args_opt):
    # load data
    raw=data_util_2.load_dataset(args_opt.train_data_path)
    print('load data successfully')

    # data preprocessing
    vocab, embeddings = data_util_2.load_glove(args_opt.glove_path,args_opt.glove_dimention)
    train_dataset=data_util_2.data_preprocess(raw,vocab,embeddings,args_opt.batch_size)
    print('pre-process data successfully')

    #dataset split because we got no validation dataset
    #ds.config.set_seed(58)
    #train_ratio=args_opt.train_ratio
    #train_dataset,valid_dataset=dataset.split([train_ratio,1-train_ratio])
    #print('split the dataset successfully')


    # setting some hyper parameters
    hidden_size = args_opt.hidden_size
    output_size = args_opt.class_num
    num_layers = args_opt.num_layers
    bidirectional = True
    dropout = args_opt.dropout
    pad_idx = vocab.tokens_to_ids('<pad>')
    if args_opt.loss=='softmaxcrossentropywithlogis':
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
    else :
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)

    # construct model
    net = model_tiny.RNN(embeddings, hidden_size, output_size, num_layers, bidirectional, dropout, pad_idx)
    print('load model successully')

    # optimizer
    network_opt = nn.Adam(params=net.trainable_params(), learning_rate=args_opt.learning_rate)


    # criterion
    metrics = {"Accuracy": nn.Accuracy()}

    # model API
    model = ms.Model(net, loss_fn=loss, optimizer=network_opt, metrics=metrics)

    #model save config
    config_ck = ms.CheckpointConfig(
        save_checkpoint_steps=train_dataset.get_dataset_size(),
        keep_checkpoint_max=10)
    ckpoint_cb = ms.ModelCheckpoint(prefix="epoch",
                                 directory=args_opt.out_path+"/",
                                 config=config_ck)
    print("============== Starting Training ==============")
    print("one epoch: %i steps"%train_dataset.get_dataset_size())
    # 模型训练与验证，训练完成后保存验证精度最高的ckpt文件（best.ckpt）到当前目录下
    model.train(args_opt.epochs,
                train_dataset,
                callbacks=[ms.TimeMonitor(data_size=train_dataset.get_dataset_size()),ckpoint_cb,ms.LossMonitor()])
    print('训练完成,返回网络参数')
    return net