import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Lemma_Factory
from Dataloader.twitterloader import BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import pickle
import torch
import torch.nn.functional as F
import torch.nn as nn
import random
import os
import fitlog
import copy
from tqdm import trange
import numpy as np

class GradientReversal(torch.autograd.Function):
    """
    Basic layer for doing gradient reversal
    """
    lambd = 1.0
    @staticmethod
    def forward(ctx, x):
        return x

    @staticmethod
    def backward(ctx, grad_output):
        return GradientReversal.lambd * grad_output.neg()


class BiATUtils:
    def __init__(self):
        pass

    def dataset_logits(self, model:RumorDetection, data, idxs=None, batchSize=20):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batchSize):
            batchIndexs = idxs[i:min(len(idxs), i+batchSize)]
            batch = data.collate_raw_batch([data[idx] for idx in batchIndexs])
            pred = model.predict(batch)
            preds.append(pred)
        predTensor = torch.cat(preds)
        return predTensor

    def dataset_inference(self, model:RumorDetection, data, idxs=None, batchSize=20):
        predTensor = self.dataset_logits(model, data, idxs, batchSize)
        vals, idxs = predTensor.sort(dim=1)
        return idxs[:, -1], vals[:, -1]

    def acc_P_R_F1(self, yTrue, yPred):
        return accuracy_score(yTrue, yPred.cpu()), \
                    precision_recall_fscore_support(yTrue, yPred.cpu())

    def perf(self, model:RumorDetection, data, label, idxs=None, batchSize=20):
        with torch.no_grad():
            predTensor = self.dataset_logits(model, data, idxs, batchSize)
            _, yPred = predTensor.sort(dim=1)
        yTrue = label[idxs] if idxs is not None else label
        loss = F.nll_loss(predTensor.log(), yTrue.to(predTensor.device))
        return self.acc_P_R_F1(yTrue, yPred[:, -1]) + (loss, )


class BiATFramework(BiATUtils):
    def __init__(self, log_dir, suffix, model_file, class_num, lambda1, lambda2, lambda3, lambda4,
                 temperature=1.0, learning_rate=5e-3, batch_size=32):
        super(BiATFramework, self).__init__()
        if not os.path.exists(log_dir):
            os.system("mkdir {}".format(log_dir))
        fitlog.set_log_dir("{}/".format(log_dir), new_log=True)
        self.log_dir = log_dir
        self.suffix = suffix
        self.model_file = model_file
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8
        self.class_num = class_num
        self.temperature = temperature
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.valid_step = 0
        self.lambda1, self.lambda2, self.lambda3, self.lambda4 = lambda1, lambda2, lambda3, lambda4

    def vecs2logits(self, vecs, label, classifier):
        logits = classifier(vecs)
        preds = F.softmax(logits / self.temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = F.nll_loss(preds.log(), label.to(preds.device))
        acc = accuracy_score(label.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def CELoss(self, HNet, batch, label=None):
        pooledOutput = HNet.Batch2Vecs(batch)
        loss, acc = self.vecs2logits(pooledOutput,
                                     batch[-2] if label is None else label,
                                     HNet.rdm_cls)
        return loss, acc

    def ATLoss(self, HNet, batch, label=None):
        HNet.sent2vec.aug_type = "adver"
        pooledOutput = HNet.AugBatch2Vecs(batch)
        adverLoss, adverAcc = self.vecs2logits(pooledOutput,
                                               batch[-2] if label is None else label,
                                               HNet.rdm_cls)
        HNet.sent2vec.aug_type = None
        return adverLoss, adverAcc

    def AATLoss(self, HNet, TNetClassifier, batch):
        pooledOutput = HNet.Batch2Vecs(batch)
        loss, acc = self.vecs2logits(pooledOutput, batch[-2], TNetClassifier)
        HNet.zero_grad()
        loss.backward()
        aatLoss, aatAcc = self.ATLoss(HNet, batch)
        return aatLoss, aatAcc

    def MMELoss(self, HNet, batch):
        pooledOutput = HNet.Batch2Vecs(batch)
        pooledOutput = GradientReversal.apply(pooledOutput)
        normedPoolOut = pooledOutput / (pooledOutput.norm())
        logits = HNet.rdm_cls(normedPoolOut)
        preds = F.softmax(logits / self.temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = -1 * (preds * (preds.log())).sum()
        return loss

    def EVatLoss(self, HNet, batch):
        with torch.no_grad():
            preds = HNet.predict(batch)
            pseudoLable = preds.argmax(dim=1)
        loss, _ = self.CELoss(HNet, batch, pseudoLable)
        HNet.zero_grad()
        loss.backward()

        HNet.sent2vec.aug_type = "adver"
        pooledOutput = HNet.AugBatch2Vecs(batch)
        logits = HNet.rdm_cls(pooledOutput)
        preds = F.softmax(logits / self.temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        vat_loss = F.nll_loss(preds.log(), pseudoLable)
        entrophy = -1 * (preds * (preds.log())).sum()
        HNet.sent2vec.aug_type = None
        return vat_loss+entrophy

    def TLoss(self, HNet, TNetClassifier, batch):
        pooledOutput = HNet.Batch2Vecs(batch)
        loss, acc = self.vecs2logits(pooledOutput, batch[-2], TNetClassifier)
        return loss, acc

    def dataSampler(self, labeledSource, labeledTarget, unlabeledTarget):
        half_size = self.batch_size//2
        bs2 = len(labeledTarget) if len(labeledTarget) < half_size else half_size
        bs1 = self.batch_size - bs2
        def sample_fn(max_iter_step):
            for _ in range(max_iter_step):
                items1 = [labeledSource[idx] for idx in random.sample(range(len(labeledSource)),
                                                                      min(len(labeledSource), self.batch_size))]
                items2 = [labeledTarget[idx] for idx in random.sample(range(len(labeledTarget)),
                                                                      min(len(labeledTarget), self.batch_size))]
                items3 = [unlabeledTarget[idx] for idx in random.sample(range(len(unlabeledTarget)), self.batch_size)]
                batchCE = labeledSource.collate_raw_batch(items1[:bs1] + items2[:bs2])
                batchEVat = unlabeledTarget.collate_raw_batch(items3)
                batchTNet = labeledTarget.collate_raw_batch(items2)
                batchAAT = labeledSource.collate_raw_batch(items1)
                yield batchCE, batchAAT, batchEVat, batchTNet
        return sample_fn

    def PreTrainTNet(self, HNet, TNetClassifier, labeledTarget : BiGCNTwitterSet, batchSize=32, learningRate=5e-4):
        optim = torch.optim.Adam([
            {'params': HNet.sent2vec.parameters(), 'lr': learningRate * 0.01},
            {'params': HNet.prop_model.parameters(), 'lr': learningRate * 0.1},
            {'params': TNetClassifier.parameters(), 'lr': learningRate}
        ])
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05, the training process will be stopped
        lossList = []
        for step in range(200):
            batch = labeledTarget.collate_raw_batch(
                [labeledTarget[idx] for idx in random.sample(range(len(labeledTarget)),
                                                             min(len(labeledTarget), batchSize))]
            )
            TLoss, TAcc = self.TLoss(HNet, TNetClassifier, batch)
            optim.zero_grad()
            TLoss.backward()
            optim.step()
            print('####Pre Train Domain Classifier (%3d | %3d) ####, loss = %6.8f, Acc = %6.8f' % (
                step, 1000, TLoss.data.item(), TAcc
            ))
            lossList.append(TLoss.data.item())
            if len(lossList) > 20:
                lossList.pop(0)
                if np.std(lossList) < 0.05 and np.mean(lossList) < 0.2:
                    return

    def Training(self, HNet:RumorDetection, labeledSource : BiGCNTwitterSet, labeledTarget : BiGCNTwitterSet,
               unlabeledTarget : BiGCNTwitterSet, validSet : BiGCNTwitterSet, UT_Label, maxStep=10000, validEvery=20):
        assert labeledTarget is not None
        print("labeled Source/labeled Target/unlabeled Target: {}/{}/{}".format(len(labeledSource),
                                                                                len(labeledTarget),
                                                                                len(unlabeledTarget)))
        TNetClassifier = copy.deepcopy(HNet.rdm_cls)
        # self.PreTrainTNet(HNet, TNetClassifier, labeledTarget, self.batch_size, self.learning_rate)
        optim = torch.optim.Adam([
            {'params': HNet.sent2vec.parameters(), 'lr': self.learning_rate * 0.1},
            {'params': HNet.prop_model.parameters(), 'lr': self.learning_rate},
            {'params': HNet.rdm_cls.parameters(), 'lr': self.learning_rate},
            {'params': TNetClassifier.parameters(), 'lr': self.learning_rate}
        ])
        validLabel = torch.tensor(validSet.data_y).argmax(dim=1)
        sampler = self.dataSampler(labeledSource, labeledTarget, unlabeledTarget)
        for step, (batchCE, batchAAT, batchEVat, batchTNet) in enumerate(sampler(maxStep)):
            loss, acc = self.CELoss(HNet, batchCE)
            HNet.zero_grad()
            loss.backward()
            ATLoss, ATAcc = self.ATLoss(HNet, batchCE)
            (self.lambda1*ATLoss).backward()
            optim.step()

            AATLoss, AATAcc = self.AATLoss(HNet, TNetClassifier, batchAAT)
            optim.zero_grad()
            (self.lambda2*AATLoss).backward()
            optim.step()

            EVaTLoss = self.EVatLoss(HNet, batchEVat)
            optim.zero_grad()
            (self.lambda3*EVaTLoss).backward()
            optim.step()

            TLoss, TAcc = self.TLoss(HNet,TNetClassifier, batchTNet)
            optim.zero_grad()
            TLoss.backward()
            optim.step()

            MMELoss = self.MMELoss(HNet, batchEVat)
            optim.zero_grad()
            (-1*self.lambda4*MMELoss).backward()
            optim.step()

            print('#Model Update (%3d | %3d) #, loss_CE/acc_CE = %6.8f/%6.8f  |  loss_AT/acc_AT = %6.8f/%6.8f | \
                    loss_AAT/acc_AAT = %6.8f/%6.8f  |  loss_EVaT = %6.8f  | loss_T/acc_T = %6.8f/%6.8f  | loss_MME = %6.8f ' % (
                step, maxStep, loss.data.item(), acc, ATLoss.data.item(), ATAcc, AATLoss.data.item(), AATAcc,
                EVaTLoss.data.item(), TLoss.data.item(), TAcc,MMELoss.data.item()
            ))
            if (step + 1) % validEvery == 0:
                rst = self.perf(HNet, validSet, validLabel)
                acc_v, (p_v, r_v, f1_v, _), loss_v = rst
                print("valid perf:", rst)
                output_items = [("valid_acc", acc_v)] + \
                               [("valid_loss", loss_v)] + \
                               [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                               [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                               [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
                fitlog.add_metric({f"ValidPerf_{self.suffix}": dict(output_items)}, step=self.valid_step)
                self.valid_step += 1
                if acc > self.best_valid_acc:
                    self.logPerf(HNet, unlabeledTarget, UT_Label, self.suffix)
                    torch.save(HNet.state_dict(), self.model_file)
                    self.best_valid_acc = acc

    def logPerf(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        print("BestPerf : ", rst_model)
        acc_v, (p_v, r_v, f1_v, _), loss_v = rst_model
        output_items = [("test_acc", acc_v)] + \
                       [("test_loss", loss_v)] + \
                       [('test_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('test_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('test_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_best_metric({f"BestPerf_{test_suffix}": dict(output_items)})


def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

if __name__ == "__main__":
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                   ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.71.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                   ]

    domainID = 4
    fewShotCnt = 100
    fewShotSet, oldDomain, newDomain = obtain_Domain_set(
        f"../../data/twitter_fs{domainID}_{fewShotCnt}",
        f"../../data/twitter_od{domainID}_{fewShotCnt}",
        f"../../data/twitter_nd{domainID}_{fewShotCnt}"
    )
    logDir = str(__file__).rstrip(".py")
    # logDir = "MetaSelfTrain_0"
    if not os.path.exists(logDir):
        os.system("mkdir %s"%logDir)
    else:
        os.system("rm -rf %s" % logDir)
        os.system("mkdir %s" % logDir)
    fitlog.set_log_dir("%s/" % logDir, new_log=True)
    newDomainName = newDomain.data[newDomain.data_ID[0]]['event']

    TfIdf_twitter_file = "../../saved/TfIdf_twitter.pkl"
    if os.path.exists(TfIdf_twitter_file):
        with open(TfIdf_twitter_file, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [fewShotSet, oldDomain, newDomain]
                                        for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(TfIdf_twitter_file, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)
    model1 = obtain_model(tv)

    print(f"fewShotSet/newDomain/oldDomain = {len(fewShotSet)}/{len(newDomain)}/{len(oldDomain)}")
    # model.load_model(BiGCN2_Paths[domainID])
    labeledSet, unlabeledSet = newDomain.split([5.0 / len(newDomain), 1.0])

    trainer = BiATFramework(log_dir=logDir, suffix=f"{newDomainName}_FS{fewShotCnt}",
                         model_file=f"{logDir}/MME_{newDomainName}_FS{fewShotCnt}",
                         class_num=2, lambda1=1.0, lambda2=0.5, lambda3=0.05, lambda4=0.01, temperature=0.05)
    trainer.Training(model1, oldDomain, labeledSet, unlabeledSet, fewShotSet,
                     UT_Label=torch.tensor(unlabeledSet.data_y).argmax(dim=1),
                     maxStep=100000, validEvery=20)