import fitlog
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import os
import sys
from torch.utils.data import DataLoader
from Dataloader.dataloader_utils import RandomSampler
from .model_utils import grad_reverse
from .BaseRumorFramework import RumorDetection
from PropModel.PropgationTrainer import SequenceClassificationTrainer, AdverSequenceClassificationTrainer
import copy


class SentAdverRumorDetection(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier, topic_label_num, batch_size=5, grad_accum_cnt=4):
        super(SentAdverRumorDetection, self).__init__(sent2vec, propagation, classifier,
                                                       batch_size=batch_size, grad_accum_cnt=grad_accum_cnt)
        sent_hidden_size = self.sent2vec.sent_hidden_size
        self.topic_cls = nn.Sequential(
            nn.Linear(sent_hidden_size, sent_hidden_size * 2),
            nn.LeakyReLU(),
            nn.Linear(sent_hidden_size * 2, topic_label_num)
        ).to(self.device)
        self.topic_loss_fn = nn.NLLLoss()
        self.optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5 * 1.0 / self.grad_accum_cnt},
            {'params': self.topic_cls.parameters(), 'lr': 1e-3 * 1.0 / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * 1.0 / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 *1.0 / self.grad_accum_cnt}
        ]
        )
    def Adver_Loss(self, seqs, rdm_labels, topic_labels):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        loss = self.nll_loss_fn(preds.log(), rdm_labels)
        acc = accuracy_score(rdm_labels.cpu(), preds.cpu().argmax(dim=1))

        sent_vecs = grad_reverse(torch.stack([seq[0] for seq in seq_tensors]))
        topic_pred = self.topic_cls(sent_vecs).softmax(dim=1)
        topic_loss = self.topic_loss_fn(topic_pred.log(), topic_labels)
        topic_acc = accuracy_score(topic_labels.cpu(), topic_pred.cpu().argmax(dim=1))
        return loss + topic_loss, loss, acc, topic_loss, topic_acc

    def TopicLoss(self, sents, topic_labels):
        sent_vecs = self.sent2vec(sents)
        topic_pred = self.topic_cls(sent_vecs).softmax(dim=1)
        topic_loss = self.topic_loss_fn(topic_pred.log(), topic_labels)
        topic_acc = accuracy_score(topic_labels.cpu(), topic_pred.cpu().argmax(dim=1))
        return topic_acc, topic_loss

    def GANTrainIters(self, train_set, dev_set, test_set,
                      max_train_iters=100, min_step=10, max_step=1,
                      valid_every=100, lr_discount=1.0,
                      model_file=""):
        tr_sampler = RandomSampler(train_set, collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)

        G_optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt}
        ]
        )

        D_optim = torch.optim.Adam([
            {'params': self.topic_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
        ])

        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0

        def Maximum(step):
            sum_adver_loss, sum_loss, sum_topic_loss = 0.0, 0.0, 0.0
            sum_acc, sum_topic_acc = 0.0, 0.0
            for i in range(step * self.grad_accum_cnt):
                batch = tr_sampler.sample(self.batch_size)
                adver_loss, loss, acc, topic_loss, topic_acc = self.Adver_Loss(batch[0], batch[2].to(self.device),
                                                                               batch[3].to(self.device))
                adver_loss.backward()
                torch.cuda.empty_cache()
                sum_adver_loss += float(adver_loss)
                sum_loss += float(loss)
                sum_acc += float(acc)
                sum_topic_loss += float(topic_loss)
                sum_topic_acc += float(topic_acc)
                if (i + 1) % self.grad_accum_cnt == 0:
                    G_optim.step()
                    G_optim.zero_grad()
                    print(
                        'Maximum(%3d | %3d) adver_loss=%6.7f, loss/acc = %6.8f/%6.7f, topic_loss/topic_acc:%6.7f / %6.7f, best_valid_acc:%6.7f ' % (
                            i, step,
                            sum_adver_loss / self.grad_accum_cnt, sum_loss / self.grad_accum_cnt,
                            sum_acc / self.grad_accum_cnt,
                            sum_topic_loss / self.grad_accum_cnt, sum_topic_acc / self.grad_accum_cnt,
                            best_valid_acc
                        )
                    )
            D_optim.zero_grad()
            G_optim.zero_grad()

        def Minimum(step):
            for i in range(step):
                batch = tr_sampler.sample(self.batch_size * self.grad_accum_cnt)
                sents = [seq[0] for seq in batch[0]]
                topic_acc, topic_loss = self.TopicLoss(sents, batch[3].to(self.device))
                topic_loss.backward()
                D_optim.step()
                D_optim.zero_grad()
                print(
                    'Minimum(%3d | %3d) topic_loss/topic_acc:%6.7f / %6.7f, best_valid_acc:%6.7f ' % (
                        i, step,
                        topic_loss, topic_acc,
                        best_valid_acc
                    )
                )
            D_optim.zero_grad()
            G_optim.zero_grad()

        print("======init minimum====")
        Minimum(200)
        for iter in range(max_train_iters):
            print("=======Iter %3d: Maxmimum ======" % iter)
            Maximum(max_step)
            print("=======Iter %3d: Minimum ======" % iter)
            Minimum(min_step)
            if (iter + 1) % (valid_every * self.grad_accum_cnt) == 0:
                val_acc, val_loss = self.valid(dev_loader)
                test_acc, test_loss = self.valid(te_loader)
                self.train()
                best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                print(
                    '##### %6d | %6d, val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                        iter, max_train_iters,
                        val_loss, val_acc,
                        test_loss, test_acc,
                        best_valid_acc, best_valid_test_acc,
                        best_test_acc
                    )
                )
                if val_acc > best_valid_acc:
                    best_valid_acc = val_acc
                    best_valid_test_acc = test_acc
                    self.save_model(model_file)

    def AdverTrainIters(self, train_set, dev_set, test_set,
                        valid_every=100, max_epochs=10,
                        log_dir="../logs/", log_suffix="_RumorDetection", model_file="", RenameModel=True):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        train_loader = DataLoader(train_set, batch_size=self.batch_size,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)

        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        counter = 0
        self.optim.zero_grad()
        self.train()
        sum_adver_loss = 0.0
        sum_loss, sum_topic_loss = 0.0, 0.0
        sum_acc, sum_topic_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                adver_loss, loss, acc, topic_loss, topic_acc = self.Adver_Loss(batch[0], batch[2].to(self.device),
                                                                               batch[3].to(self.device))
                adver_loss.backward()
                if step % self.grad_accum_cnt == 0:
                    self.optim.step()
                    self.optim.zero_grad()
                torch.cuda.empty_cache()
                sum_adver_loss += adver_loss
                sum_loss += loss
                sum_acc += acc
                sum_topic_loss += topic_loss
                sum_topic_acc += topic_acc
                if step % self.grad_accum_cnt == 0:
                    print(
                        '%6d | %6d  [%3d | %3d], adver_loss=%6.7f, loss/acc = %6.8f/%6.7f, topic_loss/topic_acc:%6.7f / %6.7f, best_valid_acc:%6.7f ' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            sum_adver_loss / self.grad_accum_cnt, sum_loss / self.grad_accum_cnt,
                            sum_acc / self.grad_accum_cnt,
                            sum_topic_loss / self.grad_accum_cnt, sum_topic_acc / self.grad_accum_cnt,
                            best_valid_acc
                        )
                    )
                    sum_adver_loss = 0.0
                    sum_loss, sum_topic_loss = 0.0, 0.0
                    sum_acc, sum_topic_acc = 0.0, 0.0
                    fitlog.add_metric({
                                        "train":
                                        {
                                            "adver_loss": sum_adver_loss / self.grad_accum_cnt,
                                            "loss": sum_loss / self.grad_accum_cnt,
                                            "acc": sum_acc / self.grad_accum_cnt,
                                            "topic_loss": sum_topic_loss / self.grad_accum_cnt,
                                            "topic_acc": sum_topic_acc / self.grad_accum_cnt
                                        }
                                    },
                                    step=step + epoch*len(train_loader))

                fitlog.add_metric(
                    {"train": {"acc":acc, "loss":loss} }, 
                    step=counter
                )
                counter += 1
                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, '
                        'best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()
        if RenameModel:
            self.RenameModelFile(model_file, best_valid_acc)

    def RenameModelFile(self, model_file, best_valid_acc):
        new_model_file = "%s_%2.2f.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        new_sent_model_file = "%s_%2.2f_sent.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        os.system("mv %s %s"%(model_file, new_model_file))
        os.system("mv %s %s" % (sent_model_file, new_sent_model_file))

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        torch.save(
            {
                
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict(),
                "topic_cls": self.topic_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            
            self.prop_model.load_state_dict(checkpoint['prop_model'])
            self.topic_cls.load_state_dict(checkpoint['topic_cls'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class EnhancedSentAdver(SentAdverRumorDetection):
    def __init__(self, sent_share, propagation, classifier, topic_label_num,
                 lr_discount=1.0, batch_size=5, grad_accum_cnt=4):
        super(EnhancedSentAdver, self).__init__(sent_share, propagation, classifier, topic_label_num,
                                                batch_size=batch_size, grad_accum_cnt=grad_accum_cnt)
        self.sent_private = copy.deepcopy(self.sent2vec).to(self.device)
        self.optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.sent_private.parameters(), 'lr': 2e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.topic_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 *lr_discount / self.grad_accum_cnt}
        ]
        )

    def forward(self, batch):
        seqs = batch[0]
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs_share = self.sent2vec(sents)
        sent_vecs_private = self.sent_private(sents)
        sent_vecs = sent_vecs_private + sent_vecs_share
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        return preds

    def Adver_Loss(self, seqs, rdm_labels, topic_labels):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs_share = self.sent2vec(sents)
        sent_vecs_private = self.sent_private(sents)
        sent_vecs = sent_vecs_private + sent_vecs_share
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        loss = self.nll_loss_fn(preds.log(), rdm_labels)
        acc = accuracy_score(rdm_labels.cpu(), preds.cpu().argmax(dim=1))

        seq_tensors_share = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        sent_vecs_share = grad_reverse(torch.stack([seq[0] for seq in seq_tensors_share]))
        topic_pred = self.topic_cls(sent_vecs_share).softmax(dim=1)
        topic_loss = self.topic_loss_fn(topic_pred.log(), topic_labels)
        topic_acc = accuracy_score(topic_labels.cpu(), topic_pred.cpu().argmax(dim=1))
        return loss + topic_loss, loss, acc, topic_loss, topic_acc

    def TopicLoss(self, sents, topic_labels):
        sent_vecs = self.sent2vec(sents)
        topic_pred = self.topic_cls(sent_vecs).softmax(dim=1)
        topic_loss = self.topic_loss_fn(topic_pred.log(), topic_labels)
        topic_acc = accuracy_score(topic_labels.cpu(), topic_pred.cpu().argmax(dim=1))
        return topic_acc, topic_loss

    def RenameModelFile(self, model_file, best_valid_acc):
        new_model_file = "%s_%2.2f.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        new_sent_model_file = "%s_%2.2f_sent.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        sent_private = "%s_SentPrivate.pkl" % (model_file.rstrip(".pkl"))
        new_sent_private = "%s_%2.2f_SentPrivate.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        os.system("mv %s %s"%(model_file, new_model_file))
        os.system("mv %s %s" % (sent_model_file, new_sent_model_file))
        os.system("mv %s %s" % (sent_private, new_sent_private))

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        sent_private = "%s_SentPrivate.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        self.sent_private.save_model(sent_private)
        torch.save(
            {
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict(),
                "topic_cls": self.topic_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            sent_private = "%s_SentPrivate.pkl" % (model_file.rstrip(".pkl"))
            self.sent_private.load_model(sent_private)
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            self.prop_model.load_state_dict(checkpoint['prop_model'])
            self.topic_cls.load_state_dict(checkpoint['topic_cls'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class PropAdverRumorDetection(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier, topic_label_num):
        super(PropAdverRumorDetection, self).__init__(sent2vec, propagation, classifier)
        prop_hidden_size = self.prop_model.prop_hidden_size
        self.topic_cls = nn.Sequential(
            nn.Linear(prop_hidden_size, prop_hidden_size * 2),
            nn.ReLU(),
            nn.Linear(prop_hidden_size * 2, topic_label_num)
        ).to(self.device)
        self.topic_loss_fn = nn.NLLLoss()

    def Adver_Loss(self, seqs, rdm_labels, topic_labels):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        loss = self.nll_loss_fn(preds.log(), rdm_labels)
        acc = accuracy_score(rdm_labels.cpu(), preds.cpu().argmax(dim=1))

        topic_pred = self.topic_cls(seq_outs).softmax(dim=1)
        topic_loss = self.topic_loss_fn(topic_pred.log(), topic_labels)
        topic_acc = accuracy_score(topic_labels.cpu(), topic_pred.cpu().argmax(dim=1))
        return loss - 0.9 * topic_loss, loss, acc, topic_loss, topic_acc

    def AdverTrainIters(self, train_set, dev_set, test_set,
                        valid_every=100, max_epochs=10, lr_discount=1.0,
                        log_dir="../logs/", log_suffix="_RumorDetection", model_file="", RenameModel=True):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        batch_size = 5
        train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        grad_accum_cnt = 4
        
        dev_loader = DataLoader(dev_set, batch_size=batch_size * grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)

        te_loader = DataLoader(test_set, batch_size=batch_size * grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)

        optim = torch.optim.Adam(
         [
            {'params': self.sent2vec.parameters(), 'lr': 2e-6 * lr_discount / grad_accum_cnt},
            {'params': self.topic_cls.parameters(), 'lr': -1e-5 * lr_discount / grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-5 * lr_discount / grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-5 * lr_discount / grad_accum_cnt}
         ]
        )
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_adver_loss = 0.0
        sum_loss, sum_topic_loss = 0.0, 0.0
        sum_acc, sum_topic_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                adver_loss, loss, acc, topic_loss, topic_acc = self.Adver_Loss(batch[0], batch[2].to(self.device),
                                                                               batch[3].to(self.device))
                adver_loss.backward()
                if step % grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                torch.cuda.empty_cache()
                sum_adver_loss += adver_loss
                sum_loss += loss
                sum_acc += acc
                sum_topic_loss += topic_loss
                sum_topic_acc += topic_acc
                if step % grad_accum_cnt == 0:
                    print(
                        '%6d | %6d  [%3d | %3d], adver_loss=%6.7f, loss/acc = %6.8f/%6.7f, topic_loss/topic_acc:%6.7f / %6.7f, best_valid_acc:%6.7f ' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            sum_adver_loss / grad_accum_cnt, sum_loss / grad_accum_cnt, sum_acc / grad_accum_cnt,
                            sum_topic_loss / grad_accum_cnt, sum_topic_acc / grad_accum_cnt,
                            best_valid_acc
                        )
                    )
                    sum_adver_loss = 0.0
                    sum_loss, sum_topic_loss = 0.0, 0.0
                    sum_acc, sum_topic_acc = 0.0, 0.0
                fitlog.add_metric(
                    {"train": {"acc":acc, "loss":loss} }, 
                    step=counter
                )
                counter += 1
                if (step + 1) % (valid_every * grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter)
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()
        if RenameModel:
            self.RenameModelFile(model_file, best_valid_acc)

    def RenameModelFile(self, model_file, best_valid_acc):
        new_model_file = "%s_%2.2f.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        new_sent_model_file = "%s_%2.2f_sent.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        os.system("mv %s %s"%(model_file, new_model_file))
        os.system("mv %s %s" % (sent_model_file, new_sent_model_file))

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        torch.save(
            {
                
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict(),
                "topic_cls": self.topic_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            
            self.prop_model.load_state_dict(checkpoint['prop_model'])
            self.topic_cls.load_state_dict(checkpoint['topic_cls'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class EnhancedPropAdver(RumorDetection):
    def __init__(self, sent2vec, prop_specific, classifier,
                 topic_label_num, prop_specific_only=True, batch_size=5, grad_accum_cnt=4):
        super(EnhancedPropAdver, self).__init__(sent2vec, prop_specific, classifier, batch_size, grad_accum_cnt)
        prop_hidden_size = self.prop_model.prop_hidden_size
        self.prop_share = copy.deepcopy(self.prop_model).to(self.device)
        self.topic_label_num = topic_label_num
        self.topic_cls = nn.Sequential(
            nn.Linear(prop_hidden_size, prop_hidden_size * 2),
            nn.ReLU(),
            nn.Linear(prop_hidden_size * 2, topic_label_num)
        ).to(self.device)
        self.topic_loss_fn = nn.NLLLoss()
        self.prop_specific_only = prop_specific_only

    def forward(self, batch):
        seqs = batch[0]
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs_specific = self.prop_model(seq_tensors)
        if self.prop_specific_only:
            seq_outs = seq_outs_specific
        else:
            seq_outs_share = self.prop_share(seq_tensors)
            seq_outs = seq_outs_share + seq_outs_specific
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        return preds

    def DetachTopicLoss(self, seqs, topic_labels):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs_share = self.prop_share(seq_tensors)
        seq_outs_specific = self.prop_model(seq_tensors)
        seq_outs_share = grad_reverse(seq_outs_share)
        topic_pred_share = self.topic_cls(seq_outs_share).softmax(dim=1)
        topic_loss_share = self.topic_loss_fn(topic_pred_share.log(), topic_labels)
        topic_acc_share = accuracy_score(topic_labels.cpu(), topic_pred_share.cpu().argmax(dim=1))
        topic_pred_specific = self.topic_cls(seq_outs_specific).softmax(dim=1)
        topic_loss_specific = self.topic_loss_fn(topic_pred_specific.log(), topic_labels)
        topic_acc_specific = accuracy_score(topic_labels.cpu(), topic_pred_specific.cpu().argmax(dim=1))
        return topic_loss_share, topic_acc_share, topic_loss_specific, topic_acc_specific

    def Adver_Loss(self, seqs, rdm_labels, topic_labels, lambda_1=0.9, lambda_2=0.9):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs_share = self.prop_share(seq_tensors)
        seq_outs_specific = self.prop_model(seq_tensors)
        seq_outs = seq_outs_share + seq_outs_specific
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        loss = self.nll_loss_fn(preds.log(), rdm_labels)
        acc = accuracy_score(rdm_labels.cpu(), preds.cpu().argmax(dim=1))

        seq_outs_share = grad_reverse(seq_outs_share)
        topic_pred_share = self.topic_cls(seq_outs_share).softmax(dim=1)
        topic_loss_share = self.topic_loss_fn(topic_pred_share.log(), topic_labels)
        topic_acc_share = accuracy_score(topic_labels.cpu(), topic_pred_share.cpu().argmax(dim=1))

        topic_pred_specific = self.topic_cls(seq_outs_specific).softmax(dim=1)
        topic_loss_specific = self.topic_loss_fn(topic_pred_specific.log(), topic_labels)
        topic_acc_specific = accuracy_score(topic_labels.cpu(), topic_pred_specific.cpu().argmax(dim=1))
        return loss + lambda_1 * topic_loss_share + lambda_2 * topic_loss_specific, loss, \
               acc, topic_loss_share, topic_acc_share, topic_loss_specific, topic_acc_specific

    def TopicPretrain(self, train_set, dev_set, test_set):
        def collate_senti_batch(batch):
            seqs = [item[0] for item in batch]
            topic_labels = [item[3] for item in batch]
            return seqs, torch.tensor(topic_labels)

        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=collate_senti_batch)

        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=collate_senti_batch)

        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=collate_senti_batch)
        topic_trainer = SequenceClassificationTrainer(self.sent2vec, self.prop_model, classifier=self.topic_cls,
                                                      label_cnt=self.topic_label_num, batch_size=self.batch_size,
                                                      grad_accum_cnt=self.grad_accum_cnt)
        topic_trainer.train_iters(train_loader, dev_loader, te_loader,
                                  valid_every=100, max_epochs=1, lr_discount=1.0,
                                  best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                                  log_dir="../logs/", log_suffix="_TopicTrainer", model_file="topic_pretrain.pkl")
        topic_trainer.load_model("topic_pretrain.pkl")

        adver_topic_trainer = AdverSequenceClassificationTrainer(self.sent2vec, self.prop_share,
                                                                 classifier=self.topic_cls,
                                                                 label_cnt=self.topic_label_num,
                                                                 batch_size=self.batch_size,
                                                                 grad_accum_cnt=self.grad_accum_cnt)
        adver_topic_trainer.train_iters(train_loader, dev_loader, te_loader,
                                        valid_every=100, max_epochs=1, lr_discount=1.0,
                                        best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                                        log_dir="../logs/", log_suffix="_TopicTrainer",
                                        model_file="adver_topic_pretrain.pkl")
        adver_topic_trainer.load_model("adver_topic_pretrain.pkl")
        os.system("rm adver_topic_pretrain.pkl")
        os.system("rm topic_pretrain.pkl")

    def AdverTrain(self, train_set, dev_set, test_set,
                   Unseen_every=-1, lambda_1=0.9, lambda_2=0.9,
                   valid_every=100, max_epochs=10, lr_discount=1.0,
                   log_dir="../logs/", log_suffix="_RumorDetection", model_file="", RenameModel=True):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        self.prop_specific_only = False
        
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)

        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)

        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)

        optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-6 * lr_discount / self.grad_accum_cnt},
            {'params': self.topic_cls.parameters(), 'lr': -1e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_share.parameters(), 'lr': 1e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-5 * lr_discount / self.grad_accum_cnt}
        ]
        )
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_adver_loss = 0.0
        sum_loss, sum_topic_loss_share, sum_topic_loss_specific = 0.0, 0.0, 0.0
        sum_acc, sum_topic_acc_share, sum_topic_acc_specific = 0.0, 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                adver_loss, loss, acc, topic_loss_sh, topic_acc_sh, topic_loss_sp, topic_acc_sp = self.Adver_Loss(
                    batch[0], batch[2].to(self.device), batch[3].to(self.device), lambda_1, lambda_2)
                adver_loss.backward()
                torch.cuda.empty_cache()

                sum_adver_loss += adver_loss
                sum_loss += loss
                sum_topic_loss_share += topic_loss_sh
                sum_topic_loss_specific += topic_loss_sp
                sum_acc += acc
                sum_topic_acc_share += topic_acc_sh
                sum_topic_acc_specific += topic_acc_sp
                fitlog.add_metric(
                    {"train": {"acc":acc, "loss":loss} }, 
                    step=counter
                )
                counter += 1

                if (step + 1) % self.grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                    print(
                        '%6d | %6d  [%3d | %3d], adver_loss=%6.7f, loss/acc = %6.8f/%6.7f, topic_loss_s/topic_acc_s:%6.7f / %6.7f, topic_loss_p/topic_acc_p:%6.7f / %6.7f, best_valid_acc:%6.7f ' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            sum_adver_loss / self.grad_accum_cnt, sum_loss / self.grad_accum_cnt,
                            sum_acc / self.grad_accum_cnt,
                            sum_topic_loss_share / self.grad_accum_cnt, sum_topic_acc_share / self.grad_accum_cnt,
                            sum_topic_loss_specific / self.grad_accum_cnt, sum_topic_acc_specific / self.grad_accum_cnt,
                            best_valid_acc
                        )
                    )
                    sum_adver_loss = 0.0
                    sum_loss, sum_topic_loss_share, sum_topic_loss_specific = 0.0, 0.0, 0.0
                    sum_acc, sum_topic_acc_share, sum_topic_acc_specific = 0.0, 0.0, 0.0

                    if Unseen_every > 0 and (step + 1) % (Unseen_every * self.grad_accum_cnt) == 0:
                        for _ in range(self.grad_accum_cnt):
                            unseen_batch = test_set.InnerBatch(self.batch_size)
                            topic_loss_share2, topic_loss_specific2, topic_acc_share2, topic_acc_specific2 = self.DetachTopicLoss(
                                unseen_batch[0], unseen_batch[3].to(self.device))
                            (lambda_1 * topic_loss_share2 + lambda_2 * topic_loss_specific2).backward()
                        optim.step()
                        optim.zero_grad()

                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()
        if RenameModel:
            self.RenameModelFile(model_file, best_valid_acc)

    def RenameModelFile(self, model_file, best_valid_acc):
        new_model_file = "%s_%2.2f.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        new_sent_model_file = "%s_%2.2f_sent.pkl"%(model_file.rstrip(".pkl"), best_valid_acc)
        os.system("mv %s %s"%(model_file, new_model_file))
        os.system("mv %s %s" % (sent_model_file, new_sent_model_file))

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        torch.save(
            {
                
                "prop_model": self.prop_model.state_dict(),
                "prop_share": self.prop_share.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict(),
                "topic_cls": self.topic_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            
            self.prop_model.load_state_dict(checkpoint['prop_model'])
            self.prop_share.load_state_dict(checkpoint['prop_share'])
            self.topic_cls.load_state_dict(checkpoint['topic_cls'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class EnhancedAdverModel(RumorDetection):
    def __init__(self, sent2vec, prop_specific, classifier,
                 topic_label_num, prop_specific_only=True, batch_size=5, grad_accum_cnt=4):
        super(EnhancedAdverModel, self).__init__(sent2vec, prop_specific, classifier, batch_size, grad_accum_cnt)
        prop_hidden_size = self.prop_model.prop_hidden_size
        self.sent2vec_share = copy.deepcopy(self.sent2vec).to(self.device)
        self.prop_share = copy.deepcopy(self.prop_model).to(self.device)
        self.topic_label_num = topic_label_num
        self.topic_cls = nn.Sequential(
            nn.Linear(prop_hidden_size, prop_hidden_size * 2),
            nn.ReLU(),
            nn.Linear(prop_hidden_size * 2, topic_label_num)
        ).to(self.device)
        self.topic_loss_fn = nn.NLLLoss()
        self.prop_specific_only = prop_specific_only

    def forward(self, batch):
        seqs = batch[0]
        seq_tensors, seq_tensors_share = self.seq2sent_tensors(seqs)
        seq_outs_specific = self.prop_model(seq_tensors)
        if self.prop_specific_only:
            seq_outs = seq_outs_specific
        else:
            seq_outs_share = self.prop_share(seq_tensors_share)
            seq_outs = seq_outs_share + seq_outs_specific
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        return preds

    def seq2sent_tensors(self, seqs):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        sent_vecs_share = self.sent2vec_share(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_tensors_share = [sent_vecs_share[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                             enumerate(seq_len)]
        return seq_tensors, seq_tensors_share

    def seqs2tensors(self, seqs):
        seq_tensors, seq_tensors_share = self.seq2sent_tensors(seqs)
        seq_outs_specific = self.prop_model(seq_tensors)
        seq_outs_share = self.prop_share(seq_tensors_share)
        return seq_outs_share, seq_outs_specific

    def Adver_Loss(self, seqs, rdm_labels, topic_labels, lambda_1=0.9, lambda_2=0.9):
        seq_outs_share, seq_outs_specific = self.seqs2tensors(seqs)
        seq_outs = seq_outs_share + seq_outs_specific
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        loss = self.nll_loss_fn(preds.log(), rdm_labels)
        acc = accuracy_score(rdm_labels.cpu(), preds.cpu().argmax(dim=1))

        seq_outs_share = grad_reverse(seq_outs_share)
        topic_pred_share = self.topic_cls(seq_outs_share).softmax(dim=1)
        topic_loss_share = self.topic_loss_fn(topic_pred_share.log(), topic_labels)
        topic_acc_share = accuracy_score(topic_labels.cpu(), topic_pred_share.cpu().argmax(dim=1))

        topic_pred_specific = self.topic_cls(seq_outs_specific).softmax(dim=1)
        topic_loss_specific = self.topic_loss_fn(topic_pred_specific.log(), topic_labels)
        topic_acc_specific = accuracy_score(topic_labels.cpu(), topic_pred_specific.cpu().argmax(dim=1))
        return loss + lambda_1 * topic_loss_share + lambda_2 * topic_loss_specific, loss, \
               acc, topic_loss_share, topic_acc_share, topic_loss_specific, topic_acc_specific

    def DetachTopicLoss(self, seqs, topic_labels):
        seq_outs_share, seq_outs_specific = self.seqs2tensors(seqs)
        seq_outs_share = grad_reverse(seq_outs_share)
        topic_pred_share = self.topic_cls(seq_outs_share).softmax(dim=1)
        topic_loss_share = self.topic_loss_fn(topic_pred_share.log(), topic_labels)
        topic_acc_share = accuracy_score(topic_labels.cpu(), topic_pred_share.cpu().argmax(dim=1))

        topic_pred_specific = self.topic_cls(seq_outs_specific).softmax(dim=1)
        topic_loss_specific = self.topic_loss_fn(topic_pred_specific.log(), topic_labels)
        topic_acc_specific = accuracy_score(topic_labels.cpu(), topic_pred_specific.cpu().argmax(dim=1))
        return topic_loss_share, topic_acc_share, topic_loss_specific, topic_acc_specific

    def AdverTrain(self, train_set, dev_set, test_set,
                   lambda_1=0.9, lambda_2=0.9, lambda_3 = 0,
                   valid_every=100, max_epochs=10, lr_discount=1.0,
                   log_dir="../logs/", log_suffix="_RumorDetection", model_file="", RenameModel=True):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        hyper = {
            "lambda_1": lambda_1,
            "lambda_2": lambda_2,
            "lambda_3": lambda_3
        }
        fitlog.add_hyper(hyper, name="test")
        self.prop_specific_only = False
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)
        optim = torch.optim.Adam([
                {'params': self.sent2vec.parameters(), 'lr': 2e-6 * lr_discount / self.grad_accum_cnt},
                {'params': self.sent2vec_share.parameters(), 'lr': 2e-6 * lr_discount / self.grad_accum_cnt},
                {'params': self.topic_cls.parameters(), 'lr': -1e-5 * lr_discount / self.grad_accum_cnt},
                {'params': self.prop_model.parameters(), 'lr': 1e-5 * lr_discount / self.grad_accum_cnt},
                {'params': self.prop_share.parameters(), 'lr': 1e-5 * lr_discount / self.grad_accum_cnt},
                {'params': self.rdm_cls.parameters(), 'lr': 1e-5 * lr_discount / self.grad_accum_cnt}
            ]
        )
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        counter = 0
        optim.zero_grad()
        self.train()
        sum_adver_loss = 0.0
        sum_loss, sum_topic_loss_share, sum_topic_loss_specific = 0.0, 0.0, 0.0
        sum_acc, sum_topic_acc_share, sum_topic_acc_specific = 0.0, 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                adver_loss, loss, acc, topic_loss_sh, topic_acc_sh, topic_loss_sp, topic_acc_sp = self.Adver_Loss(
                    batch[0], batch[2].to(self.device), batch[3].to(self.device), lambda_1, lambda_2)
                if lambda_3 != 0:
                    unseen_batch = test_set.InnerBatch(self.batch_size)
                    topic_loss_share, topic_acc_share, topic_loss_specific, topic_acc_specific = self.DetachTopicLoss(
                        unseen_batch[0], unseen_batch[3].to(self.device))
                    adver_loss = adver_loss + lambda_3*(lambda_1*topic_loss_share + lambda_2*topic_loss_specific)
                    topic_loss_sh = (topic_loss_sh + topic_loss_share)/2
                    topic_loss_sp = (topic_loss_sp + topic_loss_specific)/2
                    topic_acc_sp = (topic_acc_specific + topic_acc_sp)/2
                    topic_acc_sh = (topic_acc_share + topic_acc_sh)/2
                adver_loss.backward()
                torch.cuda.empty_cache()
                sum_adver_loss += adver_loss
                sum_loss += loss
                sum_topic_loss_share += topic_loss_sh
                sum_topic_loss_specific += topic_loss_sp
                sum_acc += acc
                sum_topic_acc_share += topic_acc_sh
                sum_topic_acc_specific += topic_acc_sp
                fitlog.add_metric(
                    {"train": {"acc": acc, "loss": loss}},
                    step=counter
                )
                counter += 1

                if (step + 1) % self.grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                    print(
                        '%6d | %6d  [%3d | %3d], adver_loss=%6.7f, loss/acc = %6.8f/%6.7f, topic_loss_s/topic_acc_s:%6.7f / %6.7f, topic_loss_p/topic_acc_p:%6.7f / %6.7f, best_valid_acc:%6.7f ' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            sum_adver_loss / self.grad_accum_cnt, sum_loss / self.grad_accum_cnt,
                            sum_acc / self.grad_accum_cnt,
                            sum_topic_loss_share / self.grad_accum_cnt, sum_topic_acc_share / self.grad_accum_cnt,
                            sum_topic_loss_specific / self.grad_accum_cnt, sum_topic_acc_specific / self.grad_accum_cnt,
                            best_valid_acc
                        )
                    )
                    fitlog.add_metric({
                                        "train":
                                        {
                                            "adver_loss": sum_adver_loss / self.grad_accum_cnt,
                                            "loss": sum_loss / self.grad_accum_cnt,
                                            "acc": sum_acc / self.grad_accum_cnt,
                                            "topic_loss_share": sum_topic_loss_share / self.grad_accum_cnt,
                                            "topic_acc_share": sum_topic_acc_share / self.grad_accum_cnt,
                                            "topic_loss_private": sum_topic_loss_specific / self.grad_accum_cnt,
                                            "topic_acc_private": sum_topic_acc_specific / self.grad_accum_cnt
                                        }
                                    },
                                    step=step + epoch*len(train_loader))
                    sum_adver_loss = 0.0
                    sum_loss, sum_topic_loss_share, sum_topic_loss_specific = 0.0, 0.0, 0.0
                    sum_acc, sum_topic_acc_share, sum_topic_acc_specific = 0.0, 0.0, 0.0

                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid": {"acc": val_acc, "loss": val_loss}},
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_acc,
                                                    "best_valid_test_acc": best_valid_test_acc,
                                                    "best_test_acc": best_test_acc}})
        fitlog.finish()
        if RenameModel:
            self.RenameModelFile(model_file, best_valid_acc)

    def RenameModelFile(self, model_file, best_valid_acc):
        new_model_file = "%s_%2.2f.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        new_sent_model_file= "%s_%2.2f_sent.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        sent_share = "%s_SentShare.pkl" % (model_file.rstrip(".pkl"))
        new_sent_share= "%s_%2.2f_SentShare.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        os.system("mv %s %s" % (model_file, new_model_file))
        os.system("mv %s %s" % (sent_model_file, new_sent_model_file))
        os.system("mv %s %s" % (sent_share, new_sent_share))

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        sent_share = "%s_SentShare.pkl" % (model_file.rstrip(".pkl"))
        self.sent2vec_share.save_model(sent_share)
        self.sent2vec.save_model(sent_model_file)
        torch.save(
            {

                "prop_model": self.prop_model.state_dict(),
                "prop_share": self.prop_share.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict(),
                "topic_cls": self.topic_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            sent_share = "%s_SentShare.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec_share.load_model(sent_share)
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            self.prop_model.load_state_dict(checkpoint['prop_model'])
            self.prop_share.load_state_dict(checkpoint['prop_share'])
            self.topic_cls.load_state_dict(checkpoint['topic_cls'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class AdverModel(SentAdverRumorDetection):
    def __init__(self, sent2vec, prop, classifier,
                 topic_label_num,  batch_size=5, grad_accum_cnt=4):
        super(AdverModel, self).__init__(sent2vec, prop, classifier,
                                         topic_label_num, batch_size, grad_accum_cnt)
        prop_hidden_size = self.prop_model.prop_hidden_size
        self.topic_cls = nn.Sequential(
            nn.Linear(prop_hidden_size, prop_hidden_size * 2),
            nn.LeakyReLU(),
            nn.Linear(prop_hidden_size * 2, topic_label_num)
        ).to(self.device)
        self.topic_loss_fn = nn.NLLLoss()
        self.optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5 * 1.0 / self.grad_accum_cnt},
            {'params': self.topic_cls.parameters(), 'lr': 1e-3 * 1.0 / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * 1.0 / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 *1.0 / self.grad_accum_cnt}
        ]
        )

    def Adver_Loss(self, seqs, rdm_labels, topic_labels):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        loss = self.nll_loss_fn(preds.log(), rdm_labels)
        acc = accuracy_score(rdm_labels.cpu(), preds.cpu().argmax(dim=1))

        seq_outs = grad_reverse(seq_outs)
        topic_pred = self.topic_cls(seq_outs).softmax(dim=1)
        topic_loss = self.topic_loss_fn(topic_pred.log(), topic_labels)
        topic_acc = accuracy_score(topic_labels.cpu(), topic_pred.cpu().argmax(dim=1))
        return loss + topic_loss, loss, acc, topic_loss, topic_acc

    def TopicLoss(self, seqs, topic_labels):
        sents, seq_len = self.seq2sents(seqs)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        topic_pred = self.topic_cls(seq_outs).softmax(dim=1)
        topic_loss = self.topic_loss_fn(topic_pred.log(), topic_labels)
        topic_acc = accuracy_score(topic_labels.cpu(), topic_pred.cpu().argmax(dim=1))
        return topic_acc, topic_loss