import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.weiboloader import BiGCNWeiboSet
from SentModel.Sent2Vec import TFIDFBasedVec_CN
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import pickle, os, random, fitlog, pandas as pd, numpy as np
import torch, torch.nn.functional as F, torch.nn as nn
from tqdm import trange

class GradientReversal(torch.autograd.Function):
    """
    Basic layer for doing gradient reversal
    """
    lambd = 1.0
    @staticmethod
    def forward(ctx, x):
        return x

    @staticmethod
    def backward(ctx, grad_output):
        return GradientReversal.lambd * grad_output.neg()


def Generator1(labeledSource, unlabeledTarget, labeledTarget, batchSize):
    halfBS = batchSize // 2
    bs2 = halfBS if halfBS < len(labeledTarget) else len(labeledTarget)
    bs1 = batchSize - bs2
    bs3 = batchSize // 3 if batchSize < 3 * len(unlabeledTarget) else len(unlabeledTarget)
    iter1, iter2, iter3 = len(labeledSource) // bs1, \
                          len(labeledTarget) // bs2, \
                          len(unlabeledTarget) // bs3
    maxIters = max([iter1, iter2, iter3])
    def generator():
        idxsS, idxsLT, idxsUT = [], [], []
        for i in range(maxIters + 1):
            if i % iter1 == 0:
                idxsS = random.sample(range(len(labeledSource)), len(labeledSource)) * 2
            if i % iter2 == 0:
                idxsLT = random.sample(range(len(labeledTarget)), len(labeledTarget)) * 2
            if i % iter3 == 0:
                idxsUT = random.sample(range(len(unlabeledTarget)), len(unlabeledTarget)) * 2
            # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
            start_LS, start_LT, start_UT = (i * bs1) % len(labeledSource), \
                                           (i * bs2) % len(labeledTarget), \
                                           (i * bs3) % len(unlabeledTarget)
            end_LS, end_LT, end_UT = start_LS + bs1, start_LT + bs2, start_UT + bs3

            items1 = [labeledSource[jj] for jj in idxsS[start_LS:end_LS]]
            items2 = [labeledTarget[jj] for jj in idxsLT[start_LT:end_LT]]
            items3 = [unlabeledTarget[jj] for jj in idxsUT[start_UT:end_UT]]
            batch1 = labeledTarget.collate_raw_batch(items1 + items2)
            batch2 = unlabeledTarget.collate_raw_batch(
                random.sample(items2, batchSize - bs1 - bs3) + items1 + items3
            )
            yield batch1, batch2
    return maxIters, generator

def Generator2(labeledSource, unlabeledTarget, batchSize):
    bs2 = batchSize // 2 if batchSize < 2 * len(unlabeledTarget) else len(unlabeledTarget)
    iter1, iter2 = len(labeledSource) // batchSize, \
                   len(unlabeledTarget) // bs2
    maxIters = max([iter1, iter2])
    def generator():
        idxsS, idxsUT = [], []
        for i in range(maxIters + 1):
            if i % iter1 == 0:
                idxsS = random.sample(range(len(labeledSource)), len(labeledSource)) * 2
            if i % iter2 == 0:
                idxsUT = random.sample(range(len(unlabeledTarget)), len(unlabeledTarget)) * 2
            # i * bs1 could be larger than the len(labelSource), thus we need to start from the remainder
            start_LS, start_UT = (i * batchSize) % len(labeledSource), \
                                    (i * bs2) % len(unlabeledTarget)
            end_LS, end_UT = start_LS + batchSize, start_UT + bs2

            items1 = [labeledSource[jj] for jj in idxsS[start_LS:end_LS]]
            items2 = [unlabeledTarget[jj] for jj in idxsUT[start_UT:end_UT]]
            batch1 = labeledSource.collate_raw_batch(items1)
            batch2 = unlabeledTarget.collate_raw_batch(
                random.sample(items1, batchSize - bs2) + items2
            )
            yield batch1, batch2 # batch1 for CE Loss, batch2 for DA Loss
    return maxIters, generator

def DataIter(labeledSource, unlabeledTarget, labeledTarget=None, batchSize=32):
    if labeledTarget is not None:
        assert len(labeledTarget) > 0
        return  Generator1(labeledSource, unlabeledTarget, labeledTarget, batchSize)
    else:
        return Generator2(labeledSource, unlabeledTarget, batchSize)

class DANNUtils:
    def __init__(self):
        pass

    def acc_P_R_F1(self, y_true, y_pred):
        return accuracy_score(y_true, y_pred.cpu()), \
                    precision_recall_fscore_support(y_true, y_pred.cpu())

    def dataIter(self, pseudo_set, labeled_target=None, batch_size=32):
        p_idxs = list(range(len(pseudo_set))) if not hasattr(pseudo_set, 'valid_indexs') else pseudo_set.valid_indexs
        p_len = len(p_idxs)
        if labeled_target is None:
            l_len = 0
            l_idxs = []
        else:
            l_idxs = list(range(len(labeled_target))) if not hasattr(labeled_target, 'valid_indexs') \
                                                        else labeled_target.valid_indexs
            l_len = len(l_idxs)
        data_size = p_len + l_len
        idxs = random.sample(range(data_size), data_size)*2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i+batch_size)]
            items = [pseudo_set[p_idxs[idx]] if idx < p_len else \
                        labeled_target[l_idxs[idx-p_len]] for idx in batch_idxs]
            yield pseudo_set.collate_raw_batch(items)

    def obtainOptim(self, tr_model, learning_rate):
        return torch.optim.Adam([
            {'params': tr_model.parameters(), 'lr': learning_rate}
        ])

    def lossAndAcc(self, model, batch, temperature=1.0):
        pooledOutput = model.Batch2Vecs(batch)
        logits = model.rdm_cls(pooledOutput)
        preds = F.softmax(logits / temperature, dim=1)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2].to(preds.device)
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = F.nll_loss(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataset_logits(self, model: RumorDetection, data, idxs=None, batch_size=40):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i + batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
        pred_tensor = torch.cat(preds)
        return pred_tensor

    def dataset_inference(self, model:RumorDetection, data, idxs=None, batch_size=20):
        pred_tensor = self.dataset_logits(model, data, idxs, batch_size)
        vals, idxs = pred_tensor.sort(dim=1)
        return idxs[:, 1], vals[:, 1]

    def perf(self, model:RumorDetection, data, label, idxs=None, batch_size=20):
        with torch.no_grad():
            y_pred, _ = self.dataset_inference(model, data, idxs=idxs, batch_size=batch_size)
        y_true = label[idxs] if idxs is not None else label
        return self.acc_P_R_F1(y_true, y_pred)

class DANNTrainer(DANNUtils):
    def __init__(self, log_dir, suffix, model_file, class_num, temperature=1.0,
                 learning_rate=5e-3, batch_size=32, Lambda=0.1):
        super(DANNTrainer, self).__init__()
        self.log_dir = log_dir
        if not os.path.exists(self.log_dir):
            os.system("mkdir %s" % self.log_dir)
        else:
            os.system("rm -rf %s" % self.log_dir)
            os.system("mkdir %s" % self.log_dir)
        fitlog.set_log_dir(self.log_dir, new_log=True)
        self.suffix = suffix
        self.model_file = model_file
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8
        self.class_num = class_num
        self.temperature = temperature
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.Lambda = Lambda
        self.valid_step = 0

    def discriminatorLoss(self, trModel:RumorDetection, discriminator, batch, temperature=1.0, adversarial=True):
        pooledOutput = trModel.Batch2Vecs(batch)
        if adversarial:
            pooledOutput = GradientReversal.apply(pooledOutput)
        normedPoolOut = pooledOutput/(pooledOutput.norm())
        logits = discriminator(normedPoolOut)
        preds = F.softmax(logits/temperature, dim=1)
        epsilon = torch.ones_like(preds) * (1e-8)
        preds = (preds - epsilon).abs()
        loss = F.nll_loss(preds.log(), batch[-1].to(preds.device))
        acc = accuracy_score(batch[-1].cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def PreTrainDomainClassifier(self, trModel:RumorDetection, discriminator:nn.Module,
                                    labeledSource : BiGCNWeiboSet, labeledTarget : BiGCNWeiboSet,
                                    unlabeledTarget : BiGCNWeiboSet, maxEpoch, learning_rate=-1):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05, the training process will be stopped
        optim = torch.optim.Adam([
            {'params': trModel.sent2vec.parameters(), 'lr': self.learning_rate * 0.1},
            {'params': trModel.prop_model.parameters(), 'lr': self.learning_rate},
            {'params': discriminator.parameters(), 'lr': self.learning_rate}
        ])

        lossList = []
        for epoch in range(maxEpoch):
            maxIters, trainLoader = DataIter(labeledSource, unlabeledTarget, labeledTarget, self.batch_size)
            for step, (_, batch2) in enumerate(trainLoader()):
                DLoss, DAcc = self.discriminatorLoss(trModel, discriminator, batch2, adversarial=False)
                optim.zero_grad()
                DLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Pre Train Domain Classifier (%3d | %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    step, maxIters, epoch, maxEpoch, DLoss.data.item(), DAcc
                ))
                lossList.append(DLoss.data.item())
                if len(lossList) > 20:
                    lossList.pop(0)
                    if np.std(lossList) < 0.05 and np.mean(lossList) < 0.2:
                        return


    def TaskFineTuning(self, trModel:RumorDetection,
                        labeledSource : BiGCNWeiboSet, labeledTarget : BiGCNWeiboSet,
                        unlabeledTarget : BiGCNWeiboSet, maxEpoch, learning_rate=-1):
        # the rule for early stop: when the variance of the recent 50 training loss is smaller than 0.05, the training process will be stopped
        optim = torch.optim.Adam([
            {'params': trModel.sent2vec.parameters(), 'lr': self.learning_rate * 0.1},
            {'params': trModel.prop_model.parameters(), 'lr': self.learning_rate},
            {'params': trModel.rdm_cls.parameters(), 'lr': self.learning_rate}
        ])

        lossList = []
        for epoch in range(maxEpoch):
            maxIters, trainLoader = DataIter(labeledSource, unlabeledTarget, labeledTarget, self.batch_size)
            for step, (_, batch2) in enumerate(trainLoader()):
                tLoss, tAcc = self.lossAndAcc(trModel, batch2)
                optim.zero_grad()
                tLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Pre Train Domain Classifier (%3d | %3d) %3d | %3d ####, loss = %6.8f, Acc = %6.8f' % (
                    step, maxIters, epoch, maxEpoch, tLoss.data.item(), tAcc
                ))
                lossList.append(tLoss.data.item())
                if len(lossList) > 20:
                    lossList.pop(0)
                    if np.std(lossList) < 0.05 and np.mean(lossList) < 0.2:
                        return

    def ModelTrain(self, trModel : RumorDetection, discriminator:nn.Module,
                    labeledSource : BiGCNWeiboSet, labeledTarget : BiGCNWeiboSet,
                    unlabeledTarget : BiGCNWeiboSet, validSet : BiGCNWeiboSet,
                    UT_Label, maxEpoch, validEvery=20):
        print("labeled Source/labeled Target/unlabeled Target: {}/{}/{}".format(len(labeledSource),
                                                                                len(labeledTarget) if labeledTarget is not None else 0,
                                                                                len(unlabeledTarget)))
        optim = torch.optim.Adam([
            {'params': trModel.sent2vec.parameters(), 'lr': self.learning_rate * 0.1},
            {'params': trModel.prop_model.parameters(), 'lr': self.learning_rate},
            {'params': trModel.rdm_cls.parameters(), 'lr': self.learning_rate},
            {'params': discriminator.parameters(), 'lr': self.learning_rate}
        ])
        validLabel = torch.tensor(validSet.data_y).argmax(dim=1)
        for epoch in range(maxEpoch):
            maxIters, trainLoader = DataIter(labeledSource, unlabeledTarget, labeledTarget, self.batch_size)
            for step, (batch1, batch2) in enumerate(trainLoader()):
                loss, acc = self.lossAndAcc(trModel, batch1, temperature=self.temperature)
                DLoss, DAcc = self.discriminatorLoss(trModel, discriminator, batch2, temperature=self.temperature)
                trainLoss = loss + self.Lambda*DLoss
                optim.zero_grad()
                trainLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) %3d | %3d ####, trainLoss = %6.8f, loss/acc = %6.8f/%6.8f, DLoss/DAcc = %6.8f/%6.8f' % (
                    step, maxIters, epoch, maxEpoch, trainLoss.data.item(), loss.data.item(), acc, DLoss.data.item(), DAcc
                ))
                if (step+1) % validEvery == 0:
                    rst = self.perf(trModel, validSet, validLabel)
                    acc_v, (p_v, r_v, f1_v, _) = rst
                    output_items = [("valid_acc", acc_v)] + \
                                   [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                                   [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                                   [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
                    print("valid perf:", rst)
                    fitlog.add_metric({f"ValidPerf_{self.suffix}": dict(output_items)}, step=self.valid_step)
                    self.logPerf(trModel, unlabeledTarget, UT_Label, self.suffix)
                    if acc > self.best_valid_acc:
                        torch.save(trModel.state_dict(), self.model_file)
                        self.best_valid_acc = acc

    def ModelTrainV2(self, trModel : RumorDetection, discriminator:nn.Module,
                    labeledSource : BiGCNWeiboSet, labeledTarget : BiGCNWeiboSet,
                    unlabeledTarget : BiGCNWeiboSet, validSet : BiGCNWeiboSet,
                    UT_Label, maxEpoch, validEvery=20, DA_Step=1, TD_Step=3):
        print("labeled Source/labeled Target/unlabeled Target: {}/{}/{}".format(len(labeledSource),
                                                                                len(labeledTarget) if labeledTarget is not None else 0,
                                                                                len(unlabeledTarget)))
        optim = torch.optim.Adam([
            {'params': trModel.sent2vec.parameters(), 'lr': self.learning_rate},
            {'params': trModel.prop_model.parameters(), 'lr': self.learning_rate},
            {'params': discriminator.parameters(), 'lr': self.learning_rate}
        ])
        task_optim = torch.optim.Adam([
            {'params': trModel.sent2vec.parameters(), 'lr': self.learning_rate*0.01},
            {'params': trModel.prop_model.parameters(), 'lr': self.learning_rate*0.1},
            {'params': trModel.rdm_cls.parameters(), 'lr': self.learning_rate*0.1}
        ])
        validLabel = torch.tensor(validSet.data_y).argmax(dim=1)
        for epoch in range(maxEpoch):
            maxIters, trainLoader = DataIter(labeledSource, unlabeledTarget, labeledTarget, self.batch_size)
            for step, (batch1, batch2) in enumerate(trainLoader()):
                for da_idx in range(DA_Step):
                    DLoss, DAcc = self.discriminatorLoss(trModel, discriminator, batch2, temperature=self.temperature)
                    optim.zero_grad()
                    (self.Lambda*DLoss).backward()
                    optim.step()
                    print('####Domain Adversarial [%3d] (%3d | %3d) %3d | %3d #### DLoss/DAcc = %6.8f/%6.8f' % (
                        da_idx, step, maxIters, epoch, maxEpoch, DLoss.data.item(), DAcc
                    ))
                for td_idx in range(TD_Step):
                    loss, acc = self.lossAndAcc(trModel, batch1)
                    task_optim.zero_grad()
                    loss.backward()
                    task_optim.step()
                    print('****Task Discriminative [%3d] (%3d | %3d) %3d | %3d **** Loss/Acc = %6.8f/%6.8f' % (
                        td_idx, step, maxIters, epoch, maxEpoch, loss.data.item(), acc
                    ))
                torch.cuda.empty_cache()
                if (step+1) % validEvery == 0:
                    rst = self.perf(trModel, validSet, validLabel)
                    acc_v, (p_v, r_v, f1_v, _) = rst
                    output_items = [("valid_acc", acc_v)] + \
                                   [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                                   [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                                   [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
                    print("valid perf:", rst)
                    fitlog.add_metric({f"ValidPerf_{self.suffix}": dict(output_items)}, step=self.valid_step)
                    if acc_v > self.best_valid_acc:
                        torch.save(trModel.state_dict(), self.model_file)
                        self.best_valid_acc = acc_v
                        self.logPerf(trModel, unlabeledTarget, UT_Label, self.suffix, step)
                    else:
                        self.logPerf(trModel, unlabeledTarget, UT_Label, self.suffix)

    def logPerf(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        if step > 0:
            print("BestPerf : ", rst_model)
        else:
            print("Test Perf : ", rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_metric({f"TestPerf_{test_suffix}": dict(output_items)}, step=self.valid_step)
        self.valid_step += 1


def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec_CN(tfidf_vec, 20, embedding_size=300,
                            w2v_file="../../word2vec_CN_WeiboBi.pkl",
                            emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    discriminator = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 2))
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model, discriminator

def obtainOnlineSet(old_prefix, dev_prefix, te_prefix, lt_cnt=0):
    fs_set = BiGCNWeiboSet()
    od_set = BiGCNWeiboSet()
    nd_set = BiGCNWeiboSet()
    try:
        od_set.load_data_fast(data_prefix=old_prefix)
        fs_set.load_data_fast(data_prefix=dev_prefix)
        nd_set.load_data_fast(data_prefix=te_prefix)
    except AssertionError:
        weibo_dir, df_file = "../../data/Weibo", "../../data/Weibo_IDs.csv"
        df = pd.read_csv(df_file)
        topic = np.ones([len(df)], dtype=np.int64)
        topic[:2500] = 0
        df['topic'] = topic
        od_set.load_data(weibo_dir, weibo_df=df[:2500])
        od_set.Caches_Data(old_prefix)
        dev_te_df = df[2500:].sample(frac=1.0)
        fs_set.load_data(weibo_dir, weibo_df=dev_te_df[:100])
        fs_set.Caches_Data(dev_prefix)
        nd_set.load_data(weibo_dir, weibo_df=dev_te_df[100:])
        nd_set.Caches_Data(te_prefix)

    if lt_cnt > 0:
        nd_set_1, nd_set_2 = nd_set.split([lt_cnt*1.0 / len(nd_set), 1.0])
        return fs_set, od_set, nd_set_2, nd_set_1
    else:
        return fs_set, od_set, nd_set, None

if __name__ == "__main__":

    logDir = str(__file__).rstrip(".py")
    # logDir = "OnlineTest"
    if os.path.exists(logDir):
        os.system("rm -rf %s" % logDir)
    os.system("mkdir %s" % logDir)

    fewShotCnt = 100
    validTarget, labeledSource, unlabeledTarget, labeledTarget = obtainOnlineSet(
                     f"../../data/WeiboOnline_FS{fewShotCnt}_tr",
                     f"../../data/WeiboOnline_FS{fewShotCnt}_dev",
                     f"../../data/WeiboOnline_FS{fewShotCnt}_te",
                     lt_cnt=0
    )
    TfIdf_twitter_file = "../../saved/TfIdf_WEIBO.pkl"
    if os.path.exists(TfIdf_twitter_file):
        with open(TfIdf_twitter_file, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [validTarget, labeledSource, unlabeledTarget, labeledTarget]
                                        for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(TfIdf_twitter_file, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

    newDomainName = "OnlineTest"
    model1, domainClassifier = obtain_model(tv)
    ut_label = torch.tensor(unlabeledTarget.data_y).argmax(dim=1)
    domainClassifier.to(model1.device)
    trainer = DANNTrainer(logDir, f"{newDomainName}_FS{fewShotCnt}", f"{logDir}/DANN_{newDomainName}_FS{fewShotCnt}",
                            2, temperature=0.1, learning_rate=5e-3, batch_size=32, Lambda=0.1)
    trainer.logPerf(model1, unlabeledTarget, ut_label, trainer.suffix)
    trainer.PreTrainDomainClassifier(model1, domainClassifier, labeledSource, labeledTarget, unlabeledTarget, 2)
    trainer.ModelTrainV2(model1, domainClassifier, labeledSource, labeledTarget, unlabeledTarget, validTarget,
                       ut_label, 100, validEvery=30, TD_Step=7)

