import fitlog
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import os
import sys
from torch.utils.data import DataLoader
from torch.optim import Adam
from SentModel.sentence_trainer import SentimentTrainer
from torch.nn.utils.rnn import pad_packed_sequence
import math

class RumorDetection(nn.Module):
    def __init__(self, sent2vec, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(RumorDetection, self).__init__()
        self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        self.sent2vec = sent2vec.to(self.device)
        self.prop_model = propagation.to(self.device)
        self.rdm_cls = classifier.to(self.device)
        self.nll_loss_fn = nn.NLLLoss()
        self.batch_size = batch_size
        self.grad_accum_cnt = grad_accum_cnt

    def seq2sents(self, seqs):
        sent_list = [sent for seq in seqs for sent in seq]
        seq_len = [len(seq) for seq in seqs]
        return sent_list, seq_len

    def Batch2Vecs(self, batch):
        sents, seq_len = self.seq2sents(batch[0])
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        return seq_outs

    def AugBatch2Vecs(self, batch):
        sents, seq_len = self.seq2sents(batch[0])
        sent_vecs = self.sent2vec.AugForward(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        return seq_outs

    def AugPredict(self, batch):
        seq_outs = self.AugBatch2Vecs(batch)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        return preds

    def forward(self, batch):
        seq_outs = self.Batch2Vecs(batch)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        return preds

    def predict(self, batch):
        return self.forward(batch)

    def LossAndAcc(self, preds, labels):
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds -epsilon).abs() # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = self.nll_loss_fn(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def RDMLoss(self, batch):
        preds = self.forward(batch)
        loss, acc = self.LossAndAcc(preds, batch[-2].to(self.device))
        return loss, acc

    def AugLoss(self, batch):
        preds = self.AugPredict(batch)
        loss, acc = self.LossAndAcc(preds, batch[-2].to(self.device))
        return loss, acc

    def dataset2dataloader(self, train_set, dev_set, test_set):
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)
        return train_loader, dev_loader, te_loader

    def trainOptim(self, lr_discount):
        return torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 5e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt}
        ])

    def train_iters(self, train_set, dev_set, test_set,
                    valid_every=100, max_epochs=10, lr_discount=1.0,
                    best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                    log_dir="../logs/", log_suffix="_RumorDetection", model_file="", RenameModel=True):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        train_loader, dev_loader, te_loader = self.dataset2dataloader(train_set, dev_set, test_set)
        optim = self.trainOptim(lr_discount)
        counter = 0
        optim.zero_grad()
        self.train()
        sum_loss, sum_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                loss, acc = self.RDMLoss(batch)
                loss.backward()
                torch.cuda.empty_cache()
                if (step+1) % self.grad_accum_cnt == 0:
                    optim.step()
                    optim.zero_grad()
                sum_loss += loss
                sum_acc += acc
                if (step + 1) % self.grad_accum_cnt == 0:
                    print('%6d | %6d  [%3d | %3d], loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                        step, len(train_loader),
                        epoch, max_epochs,
                        sum_loss / self.grad_accum_cnt, sum_acc / self.grad_accum_cnt,
                        best_valid_acc
                    )
                          )
                    fitlog.add_metric(
                        {"train": {"acc": sum_acc / self.grad_accum_cnt, "loss": sum_loss / self.grad_accum_cnt}},
                        step=counter
                    )
                    sum_loss, sum_acc = 0.0, 0.0
                    counter += 1
                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()
        if RenameModel:
            self.RenameModel(model_file, best_valid_acc)

    def RenameModel(self, model_file, best_valid_acc):
        new_model_file = "%s_%2.2f.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        new_sent_model_file = "%s_%2.2f_sent.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        os.system("mv %s %s" % (model_file, new_model_file))
        os.system("mv %s %s" % (sent_model_file, new_sent_model_file))

    def dataset2preds(self, data_loader:DataLoader):
        labels = []
        preds = []
        with torch.no_grad():
            for batch in data_loader:
                pred = self.forward(batch)
                torch.cuda.empty_cache()
                preds.append(pred)
                labels.append(batch[-2])
            pred_tensor = torch.cat(preds, dim=0)
            label_tensor = torch.cat(labels, dim=0).to(self.device)
        return pred_tensor, label_tensor

    def PRF1(self, label_tensor, pred_tensor):
        if label_tensor.dim() == 2:
            label_tensor = label_tensor.argmax(dim=1)
        prec = precision_score(label_tensor.numpy(),
                                   pred_tensor.cpu().argmax(dim=1).numpy())
        recall = recall_score(label_tensor.numpy(),
                                  pred_tensor.cpu().argmax(dim=1).numpy())
        f1 = f1_score(label_tensor.numpy(),
                          pred_tensor.cpu().argmax(dim=1).numpy())
        return prec, recall, f1

    def valid(self, data_loader:DataLoader, pretrained_file=None, all_metrics=False):
        self.eval()
        if pretrained_file is not None and os.path.exists(pretrained_file):
            self.load_model(pretrained_file)
        pred_tensor, label_tensor = self.dataset2preds(data_loader)
        val_acc, val_loss = self.LossAndAcc(pred_tensor, label_tensor)
        self.train()
        if all_metrics:
            val_prec, val_recall, val_f1 = self.PRF1(label_tensor, pred_tensor)
            return val_acc, val_loss, val_prec, val_recall, val_f1
        else:
            return val_acc, val_loss

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        torch.save(
            {
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            self.prop_model.load_state_dict(checkpoint['prop_model'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class CEDFramework(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier,
                 lambda_0, lambda_1,
                 batch_size=5, grad_accum_cnt=4, alpha=0.95):
        super(CEDFramework, self).__init__(sent2vec, propagation, classifier,
                                           batch_size, grad_accum_cnt)
        self.nll_loss_mean = nn.NLLLoss(ignore_index=-1)
        self.nll_loss_none = nn.NLLLoss(ignore_index=-1, reduction="none")
        self.batch_size = 1  # fixed the input batch_size
        self.alpha = alpha
        self.lambda_0 = lambda_0
        self.lambda_1 = lambda_1

    def SeqPredict(self, seq):
        sents, seq_len = self.seq2sents(seq)
        sent_vecs = self.sent2vec(sents)
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        df_outs, seq_outs = self.prop_model(seq_tensors, output_hidden_states=True)
        df_out_tensors, batchsize = pad_packed_sequence(df_outs, batch_first=True)
        preds = self.rdm_cls(df_out_tensors).softmax(dim=-1)  # [1, seq_len, 2]
        return preds.squeeze(0)  # [seq_len, 2]

    def pred_loss(self, preds, labels):
        pred_labels = labels.clone()
        lable_idx = int(labels[0])
        for i in range(len(preds)):
            if float(preds[i][lable_idx]) > self.alpha:
                break
            else:
                pred_labels[i] = -1
        return self.nll_loss_mean(preds, pred_labels)

    def time_loss(self, preds, labels):
        weights = torch.zeros_like(preds, device=self.device)
        label_idx = int(labels[0])
        for i in range(len(preds)):
            if float(preds[i][label_idx]) > self.alpha:
                break
            else:
                weights[i][label_idx] = 1.0 / preds[i][label_idx]
        sum_loss = (preds * weights).sum()
        mean_loss = sum_loss * 1.0 / len(preds)
        return mean_loss

    def diff_loss(self, preds, labels):
        pred_labels = labels.clone()
        weights = torch.zeros_like(labels, device=self.device)
        label_idx = int(labels[0])
        for i in range(len(preds)):
            if float(preds[i][label_idx]) > self.alpha:
                break
            else:
                pred_labels[i] = -1
        seq_loss = self.nll_loss_none(preds, pred_labels)
        if label_idx == 0:
            weights[i:].fill_(math.log(1.0 - self.alpha))
            return (seq_loss - weights).mean()
        else:
            weights[i:].fill_(math.log(self.alpha))
            return (weights - seq_loss).mean()

    def RDMLoss(self, batch):  # seq: [seq]
        seqs, labels = batch[0], batch[-2]
        assert isinstance(seqs[0], list) and len(seqs) == 1 and len(labels) == 1
        preds = self.SeqPredict(seqs)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds -epsilon).abs() # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        seq_labels = labels.repeat(len(seqs[0])).to(self.device)  # [seq_len]
        pred_loss = self.pred_loss(preds, seq_labels)
        time_loss = self.time_loss(preds, seq_labels)
        diff_loss = self.diff_loss(preds, seq_labels)
        acc = accuracy_score(seq_labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        loss = pred_loss + self.lambda_0 * diff_loss + self.lambda_1 * time_loss
        return loss, acc

class SubjMTLFrameWork(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(SubjMTLFrameWork, self).__init__(sent2vec, propagation, classifier,
                                               batch_size, grad_accum_cnt)
        self.senti_cls = nn.Linear(sent2vec.sent_hidden_size, 2).to(device=self.device)
        self.subj_optim = Adam([
            {'params': self.sent2vec.parameters(), 'lr': 2e-5, "weight_decay": 0.1},
            {'params': self.senti_cls.parameters(), 'lr': 2e-3, "weight_decay": 0.1},
        ])

    def SentimenTrain(self, tr_loader, dev_loader, te_loader, max_epoches=20,
                      print_every=10, valid_every=100,
                      log_dir="../logs/", log_suffix="_RumorDetection", model_file=""):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)

        sum_loss = 0
        sum_acc = 0
        best_valid_acc = 0.0
        best_test_acc = 0.0
        best_valid_test_acc = 0.0
        self.train()
        for epoch in range(max_epoches):
            for step, batch in enumerate(tr_loader):
                loss, acc = self.SentimentLoss(batch[0], batch[1])
                self.subj_optim.zero_grad()
                loss.backward()
                self.subj_optim.step()
                torch.cuda.empty_cache()
                sum_loss += loss
                sum_acc += acc
                if (step + 1) % print_every == 0:
                    mean_loss = sum_loss * 1.0 / print_every
                    mean_acc = sum_acc * 1.0 / print_every
                    print("#Sentiment Training# %3d|%3d [%3d | %3d] loss/acc: %6.7f / %6.7f" % (
                        step, len(tr_loader),
                        epoch, max_epoches,
                        mean_loss, mean_acc))
                    sum_loss = 0
                    sum_acc = 0
                if (step + 1) % valid_every == 0:
                    val_acc, val_loss = self.SentiValid(dev_loader)
                    test_acc, test_loss = self.SentiValid(te_loader)
                    print(
                        '##### %6d | %5d [%3d | %3d] val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f, best_valid_acc/best_valid_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(tr_loader),
                            epoch, max_epoches,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    best_test_acc = test_acc if test_acc > best_test_acc else best_test_acc
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_test_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()

    def SentimentLoss(self, sents, labels):
        batch_y = labels.to(self.device)
        preds = self.SentimentScore(sents)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds -epsilon).abs() # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        loss = self.nll_loss_fn(preds.log(), batch_y)
        acc = accuracy_score(batch_y.cpu(), preds.cpu().argmax(dim=1))
        return loss, acc

    def SentimentScore(self, sents):
        sent_vecs = self.sent2vec(sents)
        preds = self.senti_cls(sent_vecs).softmax(dim=1)
        return preds

    def SentiValid(self, data_set, pretrained_file=None):
        self.eval()
        if pretrained_file is not None:
            self.load_model(pretrained_file=pretrained_file)
        with torch.no_grad():
            preds = []
            labels = []
            for batch in data_set:
                pred = self.SentimentScore(batch[0])
                y_label = batch[1].to(self.device)
                labels.append(y_label)
                preds.append(pred)
                torch.cuda.empty_cache()
            pred_tensor = torch.cat(preds).cpu()
            label_tensor = torch.cat(labels).cpu()
            val_acc = accuracy_score(label_tensor.numpy(),
                                     pred_tensor.argmax(dim=1).numpy())
            val_loss = self.nll_loss_fn(pred_tensor.log(), label_tensor).mean()
        self.train()
        return val_acc, val_loss

    def joint_train_iters(self, train_set, dev_set, test_set,
                          subj_tr_set, subj_dev_set, subj_te_set,
                          gamma=0.5, valid_every=100, max_epochs=10, lr_discount=1.0,
                          best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                          log_dir="../logs/", log_suffix="_RumorDetection", model_file="", RenameModel=True):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=train_set.collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                                collate_fn=dev_set.collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size * self.grad_accum_cnt, shuffle=True,
                               collate_fn=test_set.collate_raw_batch)
        subj_dev_loader = DataLoader(subj_dev_set, batch_size=20, shuffle=False,
                                     collate_fn=subj_dev_set.collate_raw_batch)
        subj_te_loader = DataLoader(subj_te_set, batch_size=20, shuffle=False, collate_fn=subj_te_set.collate_raw_batch)

        optim = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 5e-5 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.senti_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
        ]
        )
        
        counter = 0
        optim.zero_grad()
        sum_loss, sum_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                loss, acc = self.RDMLoss(batch)
                loss.backward()
                torch.cuda.empty_cache()
                sum_loss += loss
                sum_acc += acc
                if step % self.grad_accum_cnt == 0:
                    batch = subj_tr_set.sample_batch(self.grad_accum_cnt * self.batch_size)
                    subj_loss, subj_acc = self.SentimentLoss(batch[0], batch[1])
                    subj_loss = subj_loss * gamma * self.grad_accum_cnt  # multipy grad_accum_cnt to eliminate its impact in learing rate
                    subj_loss.backward()
                    optim.step()
                    optim.zero_grad()
                    print(
                        '%6d | %6d  [%3d | %3d], subj_loss/subj_acc=%6.7f/%6.7f, loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            subj_loss, subj_acc,
                            sum_loss / self.grad_accum_cnt, sum_acc / self.grad_accum_cnt,
                            best_valid_acc
                        )
                        )
                    fitlog.add_metric(
                        {"train": {"acc": sum_acc / self.grad_accum_cnt, "loss": sum_loss / self.grad_accum_cnt}},
                        step=counter
                    )
                    sum_loss, sum_acc = 0.0, 0.0
                    counter += 1
                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}}, 
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)

            val_acc, val_loss = self.SentiValid(subj_dev_loader)
            test_acc, test_loss = self.SentiValid(subj_te_loader)
            print(
                '##### Subjectivity Validation: [%3d | %3d ] val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f' % (
                    epoch, max_epochs,
                    val_loss, val_acc,
                    test_loss, test_acc
                )
                )

        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()
        if RenameModel:
            self.RenameModel(model_file, best_valid_acc)

    def ContinualLearning(self, train_set, dev_set, test_set,
                          subj_tr_set, subj_dev_set, subj_te_set,
                          gamma=0.5, valid_every=100, max_epochs=10, lr_discount=1.0,
                          best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                          log_dir="../logs/", log_suffix="_RumorDetection", model_file=""):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        subj_tr_loader = DataLoader(subj_tr_set, batch_size=20, shuffle=False,
                                    collate_fn=subj_dev_set.collate_raw_batch)
        subj_dev_loader = DataLoader(subj_dev_set, batch_size=20, shuffle=False,
                                     collate_fn=subj_dev_set.collate_raw_batch)
        subj_te_loader = DataLoader(subj_te_set, batch_size=20, shuffle=False,
                                    collate_fn=subj_te_set.collate_raw_batch)
        self.SentimenTrain(subj_tr_loader, subj_dev_loader, subj_te_loader,
                           max_epoches=10, print_every=10, valid_every=100,
                           model_file=model_file
                           )
        self.load_model(model_file)
        self.joint_train_iters(train_set, dev_set, test_set,
                               subj_tr_set, subj_dev_set, subj_te_set,
                               gamma=gamma, valid_every=valid_every, max_epochs=max_epochs,
                               lr_discount=lr_discount, best_valid_acc=best_valid_acc,
                               best_test_acc=best_test_acc, best_valid_test_acc=best_valid_test_acc,
                               log_dir=log_dir, log_suffix=log_suffix, model_file=model_file)

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        torch.save(
            {
                
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict(),
                "senti_cls": self.senti_cls.state_dict()
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec.load_model(sent_model_file)
            checkpoint = torch.load(model_file)
            
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            self.prop_model.load_state_dict(checkpoint['prop_model'])
            self.senti_cls.load_state_dict(checkpoint['senti_cls'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()

class SIFTER(RumorDetection):
    def __init__(self, sent2vec1, sent2vec2, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(SIFTER, self).__init__(sent2vec1, propagation, classifier,
                                                    batch_size=batch_size,
                                                    grad_accum_cnt=grad_accum_cnt)
        self.subj_trainer = SentimentTrainer(sent2vec2, senti_label_num=2)

    def Batch2Vecs(self, batch):
        sents, seq_len = self.seq2sents(batch[0])
        sent_vecs_1 = self.sent2vec(sents)
        sent_vecs_2 = self.subj_trainer.sent2vec(sents)
        sent_vecs = sent_vecs_1 + sent_vecs_2
        seq_tensors = [sent_vecs[sum(seq_len[:idx]):sum(seq_len[:idx]) + seq_len[idx]] for idx, s_len in
                       enumerate(seq_len)]
        seq_outs = self.prop_model(seq_tensors)
        return seq_outs

    def trainOptim(self, lr_discount):
        return torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.subj_trainer.sent2vec.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.subj_trainer.senti_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
        ])

    def joint_train_iters(self, train_set, dev_set, test_set,
                        subj_tr_set, subj_dev_set, subj_te_set,
                        gamma=0.5, valid_every=100, max_epochs=10, lr_discount=1.0,
                        best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                        log_dir="../logs/", log_suffix="_RumorDetection", model_file="", RenameModel=True):
        fitlog.set_log_dir("%s/" % log_dir, new_log=True)
        train_loader, dev_loader, te_loader = self.dataset2dataloader(train_set, dev_set, test_set)
        subj_dev_loader = DataLoader(subj_dev_set,
                                     batch_size=20,
                                     shuffle=False,
                                     collate_fn=subj_dev_set.collate_raw_batch)
        subj_te_loader = DataLoader(subj_te_set,
                                    batch_size=20,
                                    shuffle=False,
                                    collate_fn=subj_te_set.collate_raw_batch)
        optim = self.trainOptim(lr_discount)
        counter = 0
        optim.zero_grad()
        self.train()
        sum_loss, sum_acc = 0.0, 0.0
        for epoch in range(max_epochs):
            for step, batch in enumerate(train_loader):
                loss, acc = self.RDMLoss(batch)
                loss.backward()
                torch.cuda.empty_cache()
                sum_loss += loss
                sum_acc += acc
                if step % self.grad_accum_cnt == 0:
                    batch = subj_tr_set.sample_batch(self.grad_accum_cnt*self.batch_size)
                    subj_loss, subj_acc = self.subj_trainer.SentimentLoss(batch[0], batch[1])
                    subj_loss = subj_loss*gamma*self.grad_accum_cnt # multipy grad_accum_cnt to eliminate its impact in learing rate
                    subj_loss.backward()
                    optim.step()
                    optim.zero_grad()
                    print('%6d | %6d  [%3d | %3d], subj_loss/subj_acc=%6.7f/%6.7f, loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                        step, len(train_loader),
                        epoch, max_epochs,
                        subj_loss, subj_acc,
                        sum_loss / self.grad_accum_cnt, sum_acc / self.grad_accum_cnt,
                        best_valid_acc
                    )
                          )
                    fitlog.add_metric(
                        {"train": {"acc": sum_acc / self.grad_accum_cnt, "loss": sum_loss / self.grad_accum_cnt}},
                        step=counter
                    )
                    sum_loss, sum_acc = 0.0, 0.0
                    counter += 1
                if (step + 1) % (valid_every * self.grad_accum_cnt) == 0:
                    val_acc, val_loss = self.valid(dev_loader)
                    test_acc, test_loss = self.valid(te_loader)
                    self.train()
                    best_test_acc = test_acc if best_test_acc < test_acc else best_test_acc
                    print(
                        '##### %6d | %6d, [%3d | %3d], val_loss|val_acc = %6.8f/%6.7f, te_loss|te_acc = %6.8f/%6.7f, best_valid_acc/related_test_acc= %6.7f/%6.7f, best_test_acc=%6.7f' % (
                            step, len(train_loader),
                            epoch, max_epochs,
                            val_loss, val_acc,
                            test_loss, test_acc,
                            best_valid_acc, best_valid_test_acc,
                            best_test_acc
                        )
                    )
                    fitlog.add_metric(
                        {"valid":{"acc":val_acc, "loss":val_loss}},
                        step=counter
                    )
                    if val_acc > best_valid_acc:
                        best_valid_acc = val_acc
                        best_valid_test_acc = test_acc
                        self.save_model(model_file)

            val_acc, val_loss = self.subj_trainer.SentiValid(subj_dev_loader)
            test_acc, test_loss = self.subj_trainer.SentiValid(subj_te_loader)
            print('##### Subjectivity Validation: [%3d | %3d ] val_loss|val_acc = %6.8f/%6.7f, test_loss|test_acc = %6.8f/%6.7f' % (
                    epoch, max_epochs,
                    val_loss, val_acc,
                    test_loss, test_acc
                )
            )
        fitlog.add_best_metric({"%s" % log_suffix: {"best_valid_acc": best_valid_acc,
                                                         "best_valid_test_acc": best_valid_test_acc,
                                                         "best_test_acc": best_test_acc}})
        fitlog.finish()
        if RenameModel:
            self.RenameModel(model_file, best_valid_acc)

    def RenameModel(self, model_file, best_valid_acc):
        new_model_file = "%s_%2.2f.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
        new_sent_model_file = "%s_%2.2f_sent.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        sent2_model_file = "%s_subjsent.pkl" % (model_file.rstrip(".pkl"))
        new_subjsent_file = "%s_%2.2f_subjsent.pkl" % (model_file.rstrip(".pkl"), best_valid_acc)
        os.system("mv %s %s" % (model_file, new_model_file))
        os.system("mv %s %s" % (sent_model_file, new_sent_model_file))
        os.system("mv %s %s" % (sent2_model_file, new_subjsent_file))

    def save_model(self, model_file):
        sent_model_file = "%s_sent.pkl"%(model_file.rstrip(".pkl"))
        self.sent2vec.save_model(sent_model_file)
        sent2_model_file = "%s_subjsent.pkl"%(model_file.rstrip(".pkl"))
        self.subj_trainer.sent2vec.save_model(sent2_model_file)
        torch.save(
            {
                "prop_model": self.prop_model.state_dict(),
                "rdm_cls": self.rdm_cls.state_dict(),
            },
            model_file
        )

    def load_model(self, model_file):
        if os.path.exists(model_file):
            sent_model_file = "%s_sent.pkl" % (model_file.rstrip(".pkl"))
            self.sent2vec.load_model(sent_model_file)
            sent2_model_file = "%s_subjsent.pkl" % (model_file.rstrip(".pkl"))
            self.subj_trainer.sent2vec.load_model(sent2_model_file)
            checkpoint = torch.load(model_file, map_location=torch.device("cpu"))
            self.sent2vec.load_state_dict(checkpoint['sent2vec1'])
            self.rdm_cls.load_state_dict(checkpoint["rdm_cls"])
            self.prop_model.load_state_dict(checkpoint['prop_model'])
            self.subj_trainer.senti_cls.load_state_dict(checkpoint['senti_cls'])
        else:
            print("Error: pretrained file %s is not existed!" % model_file)
            sys.exit()