import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score
from torch.optim import Adam
from Dataloader.dataloader_utils import RandomSampler
from pytorch_transformers.modeling_bert import BertOnlyMLMHead
from .sent2vec_utils import TokenizerBasedModel, SentenceModel
from .sent2vec_utils import grad_reverse





class SentenceTrainer(TokenizerBasedModel):
    def __init__(self, sentence_model:SentenceModel):
        super(SentenceTrainer, self).__init__()
        self.sent2vec = sentence_model

    def forward(self, sents):
        return self.sent2vec(sents)

class LMTrainer(TokenizerBasedModel):
    def __init__(self, sentence_model, config):
        super(LMTrainer, self).__init__()
        self.sent2vec = sentence_model
        self.config = config
        self.LM_loss_fn = nn.CrossEntropyLoss(ignore_index=-1)
        self.lm_cls = BertOnlyMLMHead(config).to(device=self.device)
        self.config = config

    def forward(self, sents):
        input_ids, att_masks, masked_lm_labels = self.sent2vec.sents2mlm_ids(sents, mlm_probs=0.2)
        hiddens, _ = self.sent2vec.tokens2vecs(input_ids, att_masks)
        return hiddens

    def LMLoss(self, sents):
        input_ids, att_masks, masked_lm_labels = self.sent2vec.sents2mlm_ids(sents, mlm_probs=0.2)
        hiddens, _ = self.sent2vec.tokens2vecs(input_ids, att_masks)
        prediction_scores = self.lm_cls(hiddens)
        masked_lm_loss = self.LM_loss_fn(prediction_scores.view(-1, self.config.vocab_size),
                                         masked_lm_labels.view(-1))

        # masked_lm_loss = masked_lm_loss + cls_loss # <----- zhai

        d_flag = masked_lm_labels.__ge__(0)
        y_label = masked_lm_labels[d_flag].view(-1)
        pred = prediction_scores[d_flag].view(-1, self.config.vocab_size)
        masked_lm_acc = accuracy_score(y_label.cpu().numpy(),
                                       pred.cpu().argmax(dim=1).numpy())
        return masked_lm_loss, masked_lm_acc

    def LMValid(self, data_loader):
        self.eval()
        with torch.no_grad():
            preds = []
            labels = []
            for i, sents in enumerate(data_loader):
                input_ids, att_masks, masked_lm_labels = self.sent2vec.sents2mlm_ids(sents, mlm_probs=0.2)
                hiddens, _ = self.sent2vec.tokens2vecs(input_ids, att_masks)
                prediction_scores = self.lm_cls(hiddens)
                d_flag = masked_lm_labels.__ge__(0)
                y_label = masked_lm_labels[d_flag].view(-1)
                pred = prediction_scores[d_flag].view(-1, self.config.vocab_size)
                labels.append(y_label)
                preds.append(pred)
                torch.cuda.empty_cache()
            pred_tensor = torch.cat(preds).cpu()
            label_tensor = torch.cat(labels).cpu()
            val_acc = accuracy_score(label_tensor.cpu().numpy(),
                                     pred_tensor.cpu().argmax(dim=1).numpy())
            val_loss = self.LM_loss_fn(pred_tensor, label_tensor)
        self.train()
        return val_acc, val_loss

    def LMTrain(self,
                sent_loader:DataLoader,
                dev_loader:DataLoader,
                test_loader:DataLoader,
                max_epoch=10,
                print_every=10,
                valid_every=1000,
                learning_rate=2e-3,
                model_file = "../saved/lm_model.pkl"
                ):

        optim = Adam([
            {'params': self.sent2vec.parameters(), 'lr': 0.01*learning_rate},
            {'params': self.lm_cls.parameters(), 'lr': learning_rate}
        ])
        sum_loss = 0
        sum_acc = 0.0
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        self.train()
        for epoch in range(max_epoch):
            for step, sents in enumerate(sent_loader):
                loss, acc = self.LMLoss(sents)
                optim.zero_grad()
                loss.backward()
                optim.step()
                torch.cuda.empty_cache()
                sum_loss += loss
                sum_acc += acc
                if (step+1) % print_every == 0:
                    print("#LM Loss# %3d|%3d [%3d, %3d] loss:%6.7f  acc:%6.7f" % (
                        step, len(sent_loader),
                        epoch, max_epoch,
                        sum_loss * 1.0 / print_every, sum_acc * 1.0 / print_every))
                    sum_loss = 0.0
                    sum_acc = 0.0

                if (step+1) % valid_every == 0:
                    val_acc, val_loss = self.LMValid(dev_loader)
                    test_acc, test_loss = self.LMValid(test_loader)
                    print(
                        '##### %6d | %5d [%3d, %3d], val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f, best_valid_acc/best_valid_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(sent_loader),
                            epoch, max_epoch,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    best_test_acc = test_acc if test_acc > best_test_acc else best_test_acc
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                    self.save_model(model_file)


    def save_model(self, model_file):
        torch.save(
            {
                "sent2vec" : self.sent2vec.state_dict(),
                "lm_cls" : self.lm_cls.state_dict()
             },
            model_file
        )

    def load_model(self, pretrained_file):
        ch = torch.load(pretrained_file)
        self.sent2vec.load_state_dict(ch['sent2vec'])
        self.lm_cls.load_state_dict(ch['lm_cls'])

class Distill_LM_Trainer(LMTrainer):
    def __init__(self, teacher_sent2vec, student_sent2vec, config):
        super(Distill_LM_Trainer, self).__init__(student_sent2vec, config)
        self.t_s2v = teacher_sent2vec

    def SentVec_and_distillLoss(self, sents, vecs):
        ipt_ids, attn_masks = self.sents2ids(sents)
        outs, hiddens, cells = self.tokens2vecs(ipt_ids, attn_masks, False)
        sent_vecs = outs[:, 0, :] + outs[:, 1:, :].max(dim=1)[0]
        distill_loss = (sent_vecs - vecs).norm(2)
        return sent_vecs, distill_loss

    def TokenVec_and_distillLoss(self, input_ids, att_masks):
        with torch.no_grad():
            t_hiddens, _ = self.t_s2v.tokens2vecs(input_ids, att_masks)
        val_masks = att_masks.unsqueeze(-1)
        s_hiddens, _ = self.sent2vec.tokens2vecs(input_ids, att_masks)
        distill_loss = torch.pow((s_hiddens - t_hiddens)*val_masks, 2).mean()
        return s_hiddens, distill_loss

    def DistillLMLoss(self, sents):
        input_ids, att_masks, masked_lm_labels = self.sent2vec.sents2mlm_ids(sents, mlm_probs=0.2)
        hiddens, distill_loss = self.TokenVec_and_distillLoss(input_ids, att_masks)
        prediction_scores = self.lm_cls(hiddens)
        masked_lm_loss = self.LM_loss_fn(prediction_scores.view(-1, self.config.vocab_size),
                                         masked_lm_labels.view(-1))
        d_flag = masked_lm_labels.__ge__(0)
        y_label = masked_lm_labels[d_flag].view(-1)
        pred = prediction_scores[d_flag].view(-1, self.config.vocab_size)
        masked_lm_acc = accuracy_score(y_label.cpu().numpy(),
                                       pred.cpu().argmax(dim=1).numpy())
        final_loss = masked_lm_loss + 0.5 * distill_loss
        return final_loss, masked_lm_loss, distill_loss, masked_lm_acc

    def DistillTrain(self, sent_loader, max_epochs=10, print_every=10,
                     learning_rate=2e-3, model_file="../saved/Distill_LSTM_LM.pkl"):
        optim = Adam([
            {'params': self.sent2vec.parameters(), 'lr': learning_rate}
        ])
        sum_loss = 0.0
        self.train()
        for epoch in range(max_epochs):
            for step, sents in enumerate(sent_loader):
                input_ids, att_masks, masked_lm_labels = self.sent2vec.sents2mlm_ids(sents, mlm_probs=0.2)
                _, distill_loss = self.TokenVec_and_distillLoss(input_ids, att_masks)
                optim.zero_grad()
                distill_loss.backward()
                optim.step()
                torch.cuda.empty_cache()
                sum_loss += distill_loss
                if (step + 1) % print_every == 0:
                    print(
                        "#Distill Loss# %3d|%3d [%3d | %3d] distill_loss:%6.7f" % (
                            step, len(sent_loader),
                            epoch, max_epochs,
                            sum_loss * 1.0 / print_every))
                    sum_loss = 0.0
            self.save_model(model_file)

    def DistillLM_Train(self, sent_loader, dev_loader, test_loader, max_epochs=10, print_every=10,
                        valid_every=1000, learning_rate=2e-3, model_file="../saved/Distill_LSTM_LM.pkl"):
        optim = Adam([
            {'params': self.sent2vec.parameters(), 'lr': learning_rate},
            {'params': self.lm_cls.parameters(), 'lr': learning_rate}
        ])
        sum_loss1, sum_loss2, sum_loss3, sum_acc = 0.0, 0.0, 0.0, 0.0
        best_valid_acc, best_test_acc, best_valid_test_acc = 0.0, 0.0, 0.0
        self.train()
        for epoch in range(max_epochs):
            for step, sents in enumerate(sent_loader):
                final_loss, masked_lm_loss, distill_loss, masked_lm_acc = self.DistillLMLoss(sents)
                optim.zero_grad()
                final_loss.backward()
                optim.step()
                torch.cuda.empty_cache()
                sum_loss1 += final_loss
                sum_loss2 += masked_lm_loss
                sum_loss3 += distill_loss
                sum_acc += masked_lm_acc
                if (step + 1) % print_every == 0:
                    print(
                        "#FineTune Loss# %3d|%3d [%3d | %3d] final_loss:%6.7f, distill_loss:%6.7f, loss:%6.7f, acc:%6.7f" % (
                            step, len(sent_loader),
                            epoch, max_epochs,
                            sum_loss1 * 1.0 / print_every, sum_loss3 * 1.0 / print_every,
                            sum_loss2 * 1.0 / print_every, sum_acc * 1.0 / print_every))
                    sum_loss1, sum_loss2, sum_loss3, sum_acc = 0.0, 0.0, 0.0, 0.0

                if (step + 1) % valid_every == 0:
                    val_acc, val_loss = self.LMValid(dev_loader)
                    test_acc, test_loss = self.LMValid(test_loader)
                    print(
                        '##### %6d | %5d [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f, best_valid_acc/best_valid_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(sent_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    best_test_acc = test_acc if test_acc > best_test_acc else best_test_acc
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
        self.save_model(model_file)

class SentimentTrainer(SentenceTrainer):
    def __init__(self, sentence_model, senti_cls=None, senti_label_num=2):
        super(SentimentTrainer, self).__init__(sentence_model)
        self.senti_loss_fn = nn.NLLLoss()
        if senti_cls is None:
            self.senti_cls = nn.Linear(sentence_model.sent_hidden_size, senti_label_num).to(device=self.device)
        else:
            self.senti_cls = senti_cls.to(self.device)
        self.optim = Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5, "weight_decay": 0.1},
            {'params': self.senti_cls.parameters(), 'lr': 2e-3, "weight_decay": 0.1},
        ])

    def SentimenTrain(self, tr_loader, dev_loader, te_loader, max_epoches=20,
                      print_every=10, valid_every=100, model_file=""):

        sum_loss = 0
        sum_acc = 0
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        self.train()
        for epoch in range(max_epoches):
            for step, batch in enumerate(tr_loader):
                loss, acc = self.SentimentLoss(batch[0], batch[1])
                self.optim.zero_grad()
                loss.backward()
                self.optim.step()
                torch.cuda.empty_cache()
                sum_loss += loss
                sum_acc += acc
                if (step+1) % print_every == 0:
                    mean_loss = sum_loss * 1.0 / print_every
                    mean_acc = sum_acc * 1.0 / print_every
                    print("#Sentiment Training# %3d|%3d [%3d | %3d] loss/acc: %6.7f / %6.7f" % (
                        step, len(tr_loader),
                        epoch, max_epoches,
                        mean_loss, mean_acc))
                    sum_loss = 0
                    sum_acc = 0
                if (step+1) % valid_every == 0:
                    val_acc, val_loss = self.SentiValid(dev_loader)
                    test_acc, test_loss = self.SentiValid(te_loader)
                    print(
                        '##### %6d | %5d [%3d | %3d] val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f, best_valid_acc/best_valid_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(tr_loader),
                            epoch, max_epoches,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    self.train()
                    best_test_acc = test_acc if test_acc > best_test_acc else best_test_acc
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)

    def SentimentLoss(self, sents, labels):
        batch_y = labels.to(self.device)
        preds = self.SentimentScore(sents)
        loss = self.senti_loss_fn(preds.log(), batch_y)
        acc = accuracy_score(batch_y.cpu(), preds.cpu().argmax(dim=1))
        return loss, acc

    def SentimentScore(self, sents):
        sent_vecs = self.sent2vec(sents)
        preds = self.senti_cls(sent_vecs).softmax(dim=1)
        return preds

    def SentiValid(self, data_set, pretrained_file=None):
        self.eval()
        if pretrained_file is not None:
            self.load_model(pretrained_file=pretrained_file)
        with torch.no_grad():
            preds = []
            labels = []
            for batch in data_set:
                pred = self.SentimentScore(batch[0])
                y_label = batch[1].to(self.device)
                labels.append(y_label)
                preds.append(pred)
                torch.cuda.empty_cache()
            pred_tensor = torch.cat(preds).cpu()
            label_tensor = torch.cat(labels).cpu()
            val_acc = accuracy_score(label_tensor.numpy(),
                                     pred_tensor.argmax(dim=1).numpy())
            val_loss = self.senti_loss_fn(pred_tensor.log(), label_tensor).mean()
        self.train()
        return val_acc, val_loss

    def save_model(self, model_file):
        torch.save(
            {
                "sent2vec" : self.sent2vec.state_dict(),
                "senti_cls" : self.senti_cls.state_dict()
             },
            model_file
        )

    def load_model(self, pretrained_file):
        ch = torch.load(pretrained_file)
        self.sent2vec.load_state_dict(ch['sent2vec'])
        self.senti_cls.load_state_dict(ch['senti_cls'])

class TopicTrainer(SentenceTrainer):
    def __init__(self, sentence_model, topic_label_num, sent_hidden_size, topic_cls=None, batch_size=8, grad_accum_cnt=4):
        super(TopicTrainer, self).__init__(sentence_model)
        self.topic_loss_fn = nn.NLLLoss()
        if topic_cls is None:
            self.topic_cls = nn.Linear(sent_hidden_size, topic_label_num).to(self.device)
        else:
            self.topic_cls = topic_cls.to(self.device)
        self.batch_size = batch_size
        self.grad_accum_cnt = grad_accum_cnt

    def TopicLoss(self, sents, labels):
        preds = self.TopicScore(sents)
        loss = self.topic_loss_fn(preds.log(), labels.to(self.device))
        acc = accuracy_score(labels.cpu(), preds.cpu().argmax(dim=1))
        return loss, acc

    def TopicScore(self, sents):
        sent_vecs = self.sent2vec(sents)
        preds = self.topic_cls(sent_vecs).softmax(dim=1)
        return preds

    def TopicValid(self, data_loader, pretrained_file=None):
        self.eval()
        if pretrained_file is not None:
            self.load_model(pretrained_file=pretrained_file)
        with torch.no_grad():
            preds = []
            labels = []
            for batch in data_loader:
                pred = self.TopicScore(batch[0])
                y_label = batch[1].to(self.device)
                labels.append(y_label)
                preds.append(pred)
                torch.cuda.empty_cache()
        pred_tensor = torch.cat(preds).cpu()
        label_tensor = torch.cat(labels).cpu()
        val_acc = accuracy_score(label_tensor.numpy(),
                                 pred_tensor.argmax(dim=1).numpy())
        val_loss = self.topic_loss_fn(pred_tensor.log(), label_tensor).mean()
        self.train()
        return val_acc, val_loss

    def TopicTrain(self, tr, dev, te, max_epoches=20,
                   print_every=10, valid_every=100, model_file="../saved/TopicModel.pkl"):
        tr_loader = DataLoader(tr, batch_size=self.batch_size, shuffle=True, collate_fn=tr.collate_raw_batch)
        dev_loader = DataLoader(dev, batch_size=self.batch_size, shuffle=False, collate_fn=dev.collate_raw_batch)
        te_loader = DataLoader(te, batch_size=self.batch_size, shuffle=False, collate_fn=te.collate_raw_batch)

        optim = Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5, "weight_decay": 0.1},
            {'params': self.topic_cls.parameters(), 'lr': 2e-5, "weight_decay": 0.1}
        ])

        sum_loss = 0
        sum_acc = 0
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0

        self.train()
        optim.zero_grad()
        for epoch in range(max_epoches):
            for step, batch in enumerate(tr_loader):
                loss, acc = self.TopicLoss(batch[0], batch[1])
                if (step+1)%self.grad_accum_cnt == 0:
                    loss.backward()
                    optim.step()
                    optim.zero_grad()
                    torch.cuda.empty_cache()
                sum_loss += float(loss)
                sum_acc += float(acc)

                if (step + 1) % (print_every*self.grad_accum_cnt) == 0:
                    mean_loss = sum_loss * 1.0 / (print_every*self.grad_accum_cnt)
                    mean_acc = sum_acc * 1.0 / (print_every*self.grad_accum_cnt)
                    print("#Topic Training# %3d | %3d [%3d | %3d] loss/acc: %6.7f / %6.7f" % (
                        step, len(tr_loader),
                        epoch, max_epoches,
                        mean_loss, mean_acc))
                    sum_loss = 0
                    sum_acc = 0

                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.TopicValid(dev_loader)
                    test_acc, test_loss = self.TopicValid(te_loader)
                    print(
                        '##### %6d | %5d [%3d | %3d] val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f, best_valid_acc/best_valid_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(tr_loader),
                            epoch, max_epoches,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    self.train()
                    best_test_acc = test_acc if test_acc > best_test_acc else best_test_acc
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)

    def save_model(self, model_file):
        torch.save(
            {
                "sent2vec": self.sent2vec.state_dict(),
                "topic_cls": self.topic_cls.state_dict()
            },
            model_file
        )

    def load_model(self, pretrained_file):
        ch = torch.load(pretrained_file)
        self.sent2vec.load_state_dict(ch['sent2vec'])
        self.topic_cls.load_state_dict(ch['topic_cls'])

class TopicGANTrainer(TopicTrainer):
    def __init__(self, sentence_model, topic_label_num, sent_hidden_size, grl=True):
        super(TopicGANTrainer, self).__init__(sentence_model, topic_label_num, sent_hidden_size)
        self.topic_cls = nn.Sequential(
            nn.Linear(sent_hidden_size, sent_hidden_size * 2),
            nn.ReLU(),
            nn.Linear(sent_hidden_size * 2, topic_label_num)
        ).to(self.device)
        self.grl = grl

    def TopicScore(self, sents):
        sent_vecs = self.sent2vec(sents)
        if self.grl:
            sent_vecs = grad_reverse(sent_vecs)
        preds = self.topic_cls(sent_vecs).softmax(dim=1)
        return preds

    def TopicGANTrain(self, tr, dev, te,
                         max_train_iters=100, min_step=10, max_step=1,
                         valid_every=100, learning_rate=2e-3,
                         model_file="../saved/topic_bert.pkl"):
        dev_loader = DataLoader(dev, batch_size=self.batch_size, shuffle=False, collate_fn=dev.collate_raw_batch)
        te_loader = DataLoader(te, batch_size=self.batch_size, shuffle=False, collate_fn=te.collate_raw_batch)
        tr_sampler = RandomSampler(tr, tr.collate_raw_batch)
        G_optim = Adam([
            {'params': self.sent2vec.parameters(), 'lr': -0.01*learning_rate, "weight_decay": 0.1},
        ])
        D_optim = Adam([
            {'params': self.topic_cls.parameters(), 'lr': learning_rate, "weight_decay": 0.1},
        ])

        def Maximum(step_num):
            sum_loss = 0.
            sum_acc = 0.
            for step in range(step_num*self.grad_accum_cnt):
                batch = tr_sampler.sample(self.batch_size)
                loss, acc = self.TopicLoss(batch[0], batch[1])
                loss.backward()
                torch.cuda.empty_cache()
                sum_loss += float(loss)
                sum_acc += float(acc)
                if (step+1)%self.grad_accum_cnt:
                    G_optim.step()
                    D_optim.zero_grad()
                    G_optim.zero_grad()
            return sum_acc/(step_num*self.grad_accum_cnt), sum_loss/(step_num*self.grad_accum_cnt)

        def Minimum(step_num):
            sum_loss = 0.
            sum_acc = 0.
            for step in range(step_num * self.grad_accum_cnt):
                batch = tr_sampler.sample(self.batch_size)
                with torch.no_grad():
                    sent_vecs = self.sent2vec(batch[0])
                preds = self.topic_cls(sent_vecs).softmax(dim=1)
                loss = self.topic_loss_fn(preds.log(), batch[1].to(self.device))
                acc = accuracy_score(batch[1].cpu(), preds.cpu().argmax(dim=1))
                loss.backward()
                torch.cuda.empty_cache()
                sum_loss += float(loss)
                sum_acc += float(acc)
                if (step + 1) % self.grad_accum_cnt:
                    D_optim.step()
                    D_optim.zero_grad()
                    G_optim.zero_grad()
            return sum_acc/(step_num*self.grad_accum_cnt), sum_loss/(step_num*self.grad_accum_cnt)

        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        self.train()
        for step in range(max_train_iters):
            min_acc, min_loss = Minimum(min_step)
            max_acc, max_loss = Maximum(max_step)
            print("#Advarsarial Topic Train# %3d|%3d  Minimum loss/acc: %6.7f / %6.7f, Maximum loss/acc: %6.7f / %6.7f" % (
                step, max_train_iters, min_loss, min_acc, max_loss, max_acc))
            if step % valid_every == 0:
                val_acc, val_loss = self.TopicValid(dev_loader)
                test_acc, test_loss = self.TopicValid(te_loader)
                print(
                    '##### %6d | %5d val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f, best_valid_acc/best_valid_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                        step, max_train_iters,
                        val_loss, val_acc,
                        test_loss, test_acc,
                        best_valid_acc, best_valid_test_acc,
                        best_test_acc
                    )
                )
                self.train()
                best_test_acc = test_acc if test_acc > best_test_acc else best_test_acc
                if val_acc < best_valid_acc:
                    best_valid_acc = val_acc
                    best_valid_test_acc = test_acc
                    self.save_model(model_file)