from tqdm import tqdm
import numpy as np
import mindspore.nn as nn
from mindspore import load_checkpoint, load_param_into_net, save_checkpoint
from model import MyWithLossCell,classifier
import mindspore.dataset as ds
import mindspore as ms
import data_util
import os
from mindspore import Callback
import mindspore.ops as ops

class EvalCallback(Callback):
    """
    Evaluation per epoch, and save the best accuracy checkpoint.
    """
    def __init__(self, model, eval_ds, begin_eval_epoch=1, save_path="./"):
        self.model = model
        self.eval_ds = eval_ds
        self.begin_eval_epoch = begin_eval_epoch
        self.best_acc = 0
        self.save_path = save_path
        self.metrice=nn.Accuracy('multilabel')
        self.total=eval_ds.get_dataset_size()
        self.argmax_ops=ops.Argmax()
        self.onehot=ops.OneHot()
        self.on_value=ms.Tensor(1.0, ms.float32)
        self.off_value=ms.Tensor(0.0, ms.float32)
        self.depth=5

    def epoch_end(self, run_context):
        """
        evaluate at epoch end.
        """
        self.model.set_train(False)
        cb_params = run_context.original_args()
        cur_epoch = cb_params.cur_epoch_num
        if cur_epoch >= self.begin_eval_epoch:
            
            with tqdm(total=self.total) as t:
                for i in self.eval_ds.create_tuple_iterator():
                    t.set_description('evaling')
                    predictions = self.model(i[0],i[1],i[2])

                    result=self.argmax_ops(predictions)
                    y_pred=self.onehot(result,5,self.on_value,self.off_value)
                    y=i[3]
                    self.metrice.update(y_pred,y)
                    t.update(1)
            
            acc = float(self.metrice.eval())
            print("acc of epoch{}:{}".format(cur_epoch,acc))
            self.metrice.clear()
            if acc > self.best_acc:
                self.best_acc = acc
                ms.save_checkpoint(cb_params.train_network,self.save_path+"best_acc.ckpt")
                print("the best epoch is", cur_epoch, "best acc is", self.best_acc)
        self.model.set_train(True)

#训练一轮
def train_one_epoch(model, train_dataset, epoch=0):
    model.set_train()
    total = train_dataset.get_dataset_size()
    loss_total = 0
    step_total = 0
    with tqdm(total=total) as t:
        t.set_description('Epoch %i' % epoch)
        for i in train_dataset.create_tuple_iterator():
            #这里存在一点问题
            loss = model(i[0],i[1],i[2],i[3])
            loss_total += loss.asnumpy()
            step_total += 1
            t.set_postfix(loss=loss_total/step_total)
            t.update(1)

#计算每个batch的准确率
def binary_accuracy(preds, y):
    """
    计算每个batch的准确率
    """

    # 对预测值进行四舍五入
    rounded_preds = np.around(preds)
    correct = (rounded_preds == y).astype(np.float32)
    acc = correct.sum() / len(correct)
    return acc

def evaluate(model, test_dataset, criterion, epoch=0):
    total = test_dataset.get_dataset_size()
    epoch_loss = 0
    epoch_acc = 0
    step_total = 0
    model.set_train(False)

    with tqdm(total=total) as t:
        t.set_description('Epoch %i' % epoch)
        for i in test_dataset.create_tuple_iterator():
            predictions = model(i[0],i[1],i[2])
            loss = criterion(predictions, i[3])
            epoch_loss += loss.asnumpy()

            acc = binary_accuracy(predictions.asnumpy(), i[3].asnumpy())
            epoch_acc += acc

            step_total += 1
            t.set_postfix(loss=epoch_loss/step_total, acc=epoch_acc/step_total)
            t.update(1)

    return epoch_loss / total

def training_amazon(net, loss, amazon_train, amazon_valid, ckpt_file_name, lr,epochs):
    net_with_loss = MyWithLossCell(net, loss)
    optimizer = nn.Adam(net.trainable_params(), learning_rate=lr)
    train_one_step = nn.TrainOneStepCell(net_with_loss, optimizer)
    best_valid_loss = float('inf')

    for epoch in range(epochs):
        train_one_epoch(train_one_step, amazon_train, epoch)
        valid_loss = evaluate(net, amazon_valid, loss, epoch)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            save_checkpoint(net, ckpt_file_name)




def traning_process(args_opt,bert_config):
    # load data
    raw=data_util.load_dataset(args_opt.train_data_path)
    print('load data successfully')

    # data preprocessing
    dataset=data_util.pre_process(raw,args_opt.vocab_path,args_opt.max_seq_len,args_opt.batch_size)
    print('pre-process data successfully')

    #dataset split because we got no validation dataset
    ds.config.set_seed(58)
    train_ratio=args_opt.train_ratio
    train_dataset,valid_dataset=dataset.split([train_ratio,1-train_ratio])
    print('split the dataset successfully')

    # setting some hyper parameters
    ckpt_file_name = os.path.join(args_opt.out_path, 'amazon.ckpt')
    if args_opt.loss=='softmaxcrossentropywithlogis':
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
    else :
        loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)

    # construct model
    net = classifier(bert_config,False,args_opt.fc_dim1,args_opt.class_num,args_opt.bert_weight)
    print('load model successully')

    # use amazon_review for training and save model
    training_amazon(net,loss,train_dataset,valid_dataset,ckpt_file_name,args_opt.learning_rate,args_opt.epochs)

def train(args_opt,bert_config):
    # load data
    if args_opt.split_or_not==1:
        split=True
    else:
        split=False
    if split==False:
        raw=data_util.load_dataset(args_opt.train_data_path,args_opt.split_ration,split)
        # data preprocessing
        train_dataset=data_util.pre_process(raw,args_opt.vocab_path,args_opt.max_seq_len,args_opt.batch_size)
        print('pre-process data successfully')
    else:
        raw_train,raw_dev=data_util.load_dataset(args_opt.train_data_path,args_opt.split_ration,split)
        train_dataset=data_util.pre_process(raw_train,args_opt.vocab_path,args_opt.max_seq_len,args_opt.batch_size)
        dev_dataset=data_util.pre_process(raw_dev,args_opt.vocab_path,args_opt.max_seq_len,args_opt.batch_size)
        print('pre-process data successfully')

    # construct model
    net = classifier(bert_config,False,args_opt.fc_dim1,args_opt.class_num,args_opt.bert_weight)
    print('load model successully')

    if split==True:
        eval_cb=EvalCallback(net,dev_dataset,1,args_opt.out_path+"/")

    # optimizer
    network_opt = nn.Momentum(params=net.trainable_params(), learning_rate=args_opt.learning_rate,momentum=0.9)

    #loss function
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=False)

    #net with loss
    net_with_loss=MyWithLossCell(net,loss)

    # model API
    model = ms.Model(net_with_loss, optimizer=network_opt)

    #model save config
    config_ck = ms.CheckpointConfig(
        save_checkpoint_steps=train_dataset.get_dataset_size(),
        keep_checkpoint_max=10)
    ckpoint_cb = ms.ModelCheckpoint(prefix="finetune",
                                 directory=args_opt.out_path+"/",
                                 config=config_ck)
    print("============== Starting Training ==============")
    print("one epoch: %i steps"%train_dataset.get_dataset_size())
    # 模型训练与验证，训练完成后保存验证精度最高的ckpt文件（best.ckpt）到当前目录下
    if split==False:
        model.train(args_opt.epochs,
                train_dataset,
                callbacks=[ckpoint_cb,ms.LossMonitor()])
    else:
        model.train(args_opt.epochs,
                train_dataset,
                callbacks=[ckpoint_cb,ms.LossMonitor(),eval_cb])

    return net