from torch.utils.data import DataLoader
from .BaseRumorFramework import RumorDetection, SIFTER
from sklearn.metrics import accuracy_score
import torch
import torch.nn.functional as F, torch.nn as nn
from PropModel.GraphPropagation import BiGCNV2
from SentModel.Sent2Vec import TFIDFBasedVecV2

class RvNNRumorDetec(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier,
                 batch_size=5, grad_accum_cnt=4):
        super(RvNNRumorDetec, self).__init__(sent2vec, propagation, classifier,
                                              batch_size, grad_accum_cnt)

    def Batch2Vecs(self, batch):
        trees, seqs = batch[1], batch[0]
        inputs = [self.sent2vec(sents) for sents in seqs]
        input_tensors = torch.cat(inputs)
        seq_outs = self.prop_model(trees, input_tensors)
        return seq_outs

class SITERRvNN(SIFTER):
    def __init__(self, sent2vec1, sent2vec2, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(SITERRvNN, self).__init__(sent2vec1, sent2vec2, propagation, classifier,
                                        batch_size=batch_size,
                                        grad_accum_cnt=grad_accum_cnt)

    def Batch2Vecs(self, batch):
        trees, seqs = batch[1], batch[0]
        inputs = [self.sent2vec(sents) + self.subj_trainer.sent2vec(sents)
                  for sents in seqs]
        input_tensors = torch.cat(inputs)
        seq_outs = self.prop_model(trees, input_tensors)
        return seq_outs

class TransformerRumorDetec(RvNNRumorDetec):
    def __init__(self, sent2vec, propagation, classifier,
                 batch_size=5, grad_accum_cnt=4):
        super(TransformerRumorDetec, self).__init__(sent2vec, propagation, classifier,
                                              batch_size, grad_accum_cnt)

    def trainOptim(self, lr_discount):
        return torch.optim.Adam([
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt}
        ])

    def Batch2Vecs(self, batch):
        trees, seqs = batch[1], batch[0]
        inputs = [self.sent2vec(sents) for sents in seqs]
        seq_outs = self.prop_model(trees, inputs)
        return seq_outs

    def LossAndAcc(self, preds, labels):
        # loss = F.mse_loss(preds, labels)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()
        # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = F.nll_loss(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(),
                             preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataset2dataloader(self, train_set, dev_set, test_set):
        def collate_raw_batch(items):
            seqs = [item[0] for item in items]
            trees = [item[1] for item in items]
            labels = [item[2] for item in items]
            topic_labels = [item[3] for item in items]
            return seqs, trees, torch.tensor(labels), torch.tensor(topic_labels)
        print("batch_size : ", self.batch_size)
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size, shuffle=True,
                                collate_fn=collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size, shuffle=True,
                               collate_fn=collate_raw_batch)
        return train_loader, dev_loader, te_loader

class SIFTERTransformer(SIFTER):
    def __init__(self, sent2vec1, sent2vec2, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(SIFTERTransformer, self).__init__(sent2vec1, sent2vec2, propagation, classifier,
                                        batch_size=batch_size,
                                        grad_accum_cnt=grad_accum_cnt)

    def trainOptim(self, lr_discount):
        return torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.subj_trainer.sent2vec.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt},
            {'params': self.subj_trainer.senti_cls.parameters(), 'lr': 1e-3 * lr_discount / self.grad_accum_cnt}
        ], weight_decay=0.5)

    def Batch2Vecs(self, batch):
        trees, seqs = batch[1], batch[0]
        inputs = [self.sent2vec(sents) for sents in seqs]
        seq_outs = self.prop_model(trees, inputs)
        return seq_outs

    def LossAndAcc(self, preds, labels):
        loss = F.mse_loss(preds, labels)
        acc = accuracy_score(labels.argmax(dim=1).cpu().numpy(),
                             preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataset2dataloader(self, train_set, dev_set, test_set):
        def collate_raw_batch(items):
            seqs = [item[0] for item in items]
            trees = [item[1] for item in items]
            labels = [item[2] for item in items]
            topic_labels = [item[3] for item in items]
            return seqs, trees, torch.tensor(labels), torch.tensor(topic_labels)
        print("batch_size : ", self.batch_size)
        train_loader = DataLoader(train_set, batch_size=self.batch_size, shuffle=True,
                                  collate_fn=collate_raw_batch)
        dev_loader = DataLoader(dev_set, batch_size=self.batch_size, shuffle=True,
                                collate_fn=collate_raw_batch)
        te_loader = DataLoader(test_set, batch_size=self.batch_size, shuffle=True,
                               collate_fn=collate_raw_batch)
        return train_loader, dev_loader, te_loader

class BiGCNRumorDetec(RumorDetection):
    def __init__(self, sent2vec, propagation, classifier,
                 batch_size=5, grad_accum_cnt=4):
        super(BiGCNRumorDetec, self).__init__(sent2vec, propagation, classifier,
                                           batch_size, grad_accum_cnt)

    def Batch2Vecs(self, batch):
        TD_graphs, BU_graphs, seqs =batch[1], batch[2], batch[0]
        all_sents = [sent for sents in seqs for sent in sents]
        # inputs = [self.sent2vec(sents) for sents in seqs]
        inputs = self.sent2vec(all_sents)
        seq_outs = self.prop_model(TD_graphs, BU_graphs, inputs)
        return seq_outs

    def AugBatch2Vecs(self, batch):
        TD_graphs, BU_graphs, seqs =batch[1], batch[2], batch[0]
        all_sents = [sent for sents in seqs for sent in sents]
        # inputs = [self.sent2vec(sents) for sents in seqs]
        inputs = self.sent2vec.AugForward(all_sents)
        seq_outs = self.prop_model(TD_graphs, BU_graphs, inputs)
        return seq_outs

    def AugPredict(self, batch):
        seq_outs = self.AugBatch2Vecs(batch)
        preds = self.rdm_cls(seq_outs).softmax(dim=1)
        return preds

class BiGCNRumorDetecV2(BiGCNRumorDetec):
    def __init__(self, sent2vec:TFIDFBasedVecV2, propagation:BiGCNV2, classifier:nn.Module,
                 batch_size=5, grad_accum_cnt=4):
        super(BiGCNRumorDetecV2, self).__init__(sent2vec, propagation, classifier,
                                           batch_size, grad_accum_cnt)

    def Batch2Vecs(self, batch):
        tfidf_arr, num_nodes, A_TD, A_BU = batch[0], batch[1], batch[2], batch[3]
        # inputs = [self.sent2vec(sents) for sents in seqs]
        inputs = self.sent2vec(tfidf_arr)
        seq_outs = self.prop_model(num_nodes, A_TD, A_BU, inputs)
        return seq_outs

    def AugBatch2Vecs(self, batch):
        tfidf_arr, num_nodes, A_TD, A_BU = batch[0], batch[1], batch[2], batch[3]
        # inputs = [self.sent2vec(sents) for sents in seqs]
        inputs = self.sent2vec.AugForward(tfidf_arr)
        seq_outs = self.prop_model(num_nodes, A_TD, A_BU, inputs)
        return seq_outs

class SITERBiGCN(SIFTER):
    def __init__(self, sent2vec1, sent2vec2, propagation, classifier, batch_size=5, grad_accum_cnt=4):
        super(SITERBiGCN, self).__init__(sent2vec1, sent2vec2, propagation, classifier,
                                        batch_size=batch_size,
                                        grad_accum_cnt=grad_accum_cnt)

    def Batch2Vecs(self, batch):
        TD_graphs, BU_graphs, seqs =batch[1], batch[2], batch[0]
        inputs = torch.cat([self.sent2vec(sents) + self.subj_trainer.sent2vec(sents)
                              for sents in seqs])
        seq_outs = self.prop_model(TD_graphs, BU_graphs, inputs)
        return seq_outs