import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
from torch.utils.data import WeightedRandomSampler
import torch.nn.functional as F
import pickle
import torch
import numpy as np
import torch.nn as nn
import random
import os
import fitlog
from tqdm import trange, tqdm

class SelfTrainingUtils:
    def __init__(self):
        pass

    def acc_P_R_F1(self, y_true, y_pred):
        return accuracy_score(y_true, y_pred.cpu()), \
                    precision_recall_fscore_support(y_true, y_pred.cpu())

    def dataIter(self, pseudo_set, labeled_target=None, batch_size=32):
        p_idxs = list(range(len(pseudo_set))) if not hasattr(pseudo_set, 'valid_indexs') else pseudo_set.valid_indexs
        p_len = len(p_idxs)
        if labeled_target is None:
            l_len = 0
            l_idxs = []
        else:
            l_idxs = list(range(len(labeled_target))) if not hasattr(labeled_target, 'valid_indexs') \
                                                        else labeled_target.valid_indexs
            l_len = len(l_idxs)
        data_size = p_len + l_len
        idxs = random.sample(range(data_size), data_size)*2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i+batch_size)]
            items = [pseudo_set[p_idxs[idx]] if idx < p_len else \
                        labeled_target[l_idxs[idx-p_len]] for idx in batch_idxs]
            yield pseudo_set.collate_raw_batch(items)

    def obtainOptim(self, tr_model, learning_rate):
        return torch.optim.Adam([
            {'params': tr_model.parameters(), 'lr': learning_rate}
        ])

    def predict(self, model, batch):
        rand = random.random()
        if rand < 0.2:
            model.sent2vec.set_aug_type("gaussian")
        elif rand < 0.4:
            model.sent2vec.set_aug_type("g_blur")
        elif rand < 0.6:
            loss, acc = model.RDMLoss(batch)
            loss.backward()
            model.sent2vec.set_aug_type("adver")
        elif rand < 0.8:
            model.sent2vec.set_aug_type("rMask")
        else:
            model.sent2vec.set_aug_type("rReplace")
        preds = model.AugPredict(batch)
        return preds

    def lossAndAcc(self, model, batch):
        preds = self.predict(model, batch)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2]
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = F.nll_loss(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataset_logits(self, model: RumorDetection, data, idxs=None, batch_size=40):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i + batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
        pred_tensor = torch.cat(preds)
        return pred_tensor

    def dataset_inference(self, model:RumorDetection, data, idxs=None, batch_size=20):
        pred_tensor = self.dataset_logits(model, data, idxs, batch_size)
        vals, idxs = pred_tensor.sort(dim=1)
        return idxs[:, -1], vals[:, -1]

    def perf(self, model:RumorDetection, data, label, idxs=None, batch_size=20):
        with torch.no_grad():
            y_pred, _ = self.dataset_inference(model, data, idxs=idxs, batch_size=batch_size)
        y_true = label[idxs] if idxs is not None else label
        return self.acc_P_R_F1(y_true, y_pred)

class SelfTrainingBase(SelfTrainingUtils):
    def __init__(self, log_dir, suffix, model_file, class_num):
        super(SelfTrainingBase, self).__init__()
        if not os.path.exists(log_dir):
            os.system("mkdir {}".format(log_dir))
        fitlog.set_log_dir("{}/".format(log_dir), new_log=True)
        self.log_dir = log_dir
        self.suffix = suffix
        self.model_file = model_file
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8
        self.class_num = class_num
        self.valid_step = 0

    def annotate(self, model: RumorDetection, data, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            weak_label = (pred_tensor > 0.5).long().tolist()
            for i, idx in enumerate(c_idxs):
                data.data_y[idx] = weak_label[i]

    def dataSelection(self, pseudo_set):
        assert hasattr(pseudo_set, "logits")

    def modelTrain(self, max_epoch, tr_model, train_set, valid_set, extra_train=None,
                   best_valid_acc= -1.0, batch_size=32, learning_rate=5e-4, threshold=0.2):
        valid_acc = best_valid_acc if best_valid_acc != -1 else self.best_valid_acc
        loss_list = []
        tmp_model_file = "{}/{}_tmp.pkl".format(self.log_dir, self.suffix)
        optim = self.obtainOptim(tr_model, learning_rate)
        for epoch in range(max_epoch):
            for step, batch in enumerate(self.dataIter(train_set, extra_train, batch_size=batch_size)):
                loss, acc = self.lossAndAcc(tr_model, batch)
                optim.zero_grad()
                loss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Model Update#### step={} ({} | {}) ####, loss = {}'.format(
                    step, epoch, max_epoch, loss.data.item()
                ))
                loss_list.append(loss.data.item())
            mean_loss = np.mean(loss_list)
            loss_list = []
            print("========> mean loss:", mean_loss)

            rst = self.perf(tr_model, valid_set, torch.tensor(valid_set.data_y).argmax(dim=1))
            acc_v, (p_v, r_v, f1_v, _) = rst
            output_items = [("valid_acc", acc_v)] + \
                           [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                           [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                           [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
            print("valid perf:", rst)
            fitlog.add_metric({f"ValidPerf_{self.suffix}": dict(output_items)}, step=self.valid_step)
            self.valid_step += 1

            if acc_v > valid_acc:
                valid_acc = acc_v
                torch.save(tr_model.state_dict(), tmp_model_file)
            if mean_loss < threshold:  # early stop
                break
        if os.path.exists(tmp_model_file):
            tr_model.load_state_dict(torch.load(tmp_model_file))
            os.system("rm {}".format(tmp_model_file))
        return valid_acc

    def initTrainingEnv(self, model, labeled_source, labeled_target, unlabeled_set, U_label, isWeightInited=False):
        if not isWeightInited:
            source_val, source_train = labeled_source.split([100./len(labeled_source), 1.0])
            self.modelTrain(10, model,
                            source_train, source_val, best_valid_acc=0.0,
                            learning_rate=5e-3
                            )
            rst_model = self.perf(model, unlabeled_set, U_label)
            acc_v, (p_v, r_v, f1_v, _) = rst_model
            print(f"Original Performance of {self.suffix}:", rst_model)
            output_items = [("valid_acc", acc_v)] + \
                           [('valid_prec_{}'.format(i), p_v[i])  for i in range(self.class_num)] + \
                           [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                           [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
            fitlog.add_best_metric({f"Original_{self.suffix}": dict(output_items)})
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8

    def trainning(self, model, labeled_source, labeled_target, unlabeled_target, UT_label, valid_set,
                  max_iterate=100, isWeightInited=False):
        self.initTrainingEnv(model, labeled_source, labeled_target, unlabeled_target, UT_label, isWeightInited)
        for iterate in range(max_iterate):
            self.annotate(model, unlabeled_target)
            self.dataSelection(unlabeled_target)
            self.modelTrain(1, model, unlabeled_target, valid_set, extra_train=labeled_target, learning_rate=5e-4)
            self.logPerf(model, unlabeled_target, UT_label, self.suffix)

    def logPerf(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _) = rst_model
        print("step = {} : ".format(self.valid_step), rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
        fitlog.add_metric({f"TestPerf_{test_suffix}": dict(output_items)}, step=self.valid_step)
        fitlog.add_best_metric({f"BestPerf_{test_suffix}": dict(output_items)})
        self.valid_step += 1

class CRST_LRENT(SelfTrainingBase):
    def __init__(self, log_dir, suffix, model_file, class_num, alpha=0.1, topK=0.2):
        super(CRST_LRENT, self).__init__(log_dir, suffix, model_file, class_num)
        self.alpha = alpha
        self.topK = topK

    def annotate(self, model: RumorDetection, data, pseaudo_idxs=[], batch_size=20):
        c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
        with torch.no_grad():
            pred_tensor = self.dataset_logits(model, data, idxs=c_idxs, batch_size=batch_size)
            if not hasattr(data, "logits"):
                data.logits = torch.zeros([len(data), self.class_num], device=pred_tensor.device)
            data.logits[c_idxs] = pred_tensor
        topk_vals, _ = pred_tensor.topk(int(self.topK*len(pred_tensor)), dim=0)
        self.lambda_k = topk_vals[-1]
        pseudo_label = (pred_tensor/self.lambda_k).pow(1.0/self.alpha)
        pseudo_label = (pseudo_label/(pseudo_label.sum(dim=1).unsqueeze(-1))).tolist()
        for i, idx in enumerate(c_idxs):
            data.data_y[idx] = pseudo_label[i]

    def lossAndAcc(self, model, batch):
        preds = self.predict(model, batch)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2]
        loss = (-1.0*(preds.log())*labels.to(preds.device)).sum(dim=1).mean()
        acc = accuracy_score(labels.argmax(dim=1).cpu().numpy(),
                             preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataSelection(self, pseudo_set):
        assert hasattr(pseudo_set, "logits")
        _, indexs = pseudo_set.logits.topk(int(len(pseudo_set)*self.topK), dim=0)
        pseudo_set.valid_indexs = indexs.reshape([-1]).tolist()


class CRST_Dataset(BiGCNTwitterSet):
    def __init__(self, batchsize=20):
        super(CRST_Dataset, self).__init__(batchsize)

    def collate_raw_batch(self, batch):
        seqs = [item[0] for item in batch]
        TD_graphs = [item[1] for item in batch]
        BU_graphs = [item[2] for item in batch]
        labels = [item[3] for item in batch]
        topic_labels = [item[4] for item in batch]
        return seqs, TD_graphs, BU_graphs, torch.tensor(labels), torch.tensor(topic_labels)

def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",grad_preserve=True,
                         emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = CRST_Dataset()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = CRST_Dataset()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = CRST_Dataset()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set


if __name__ == "__main__":
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                   ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.71.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                   ]

    domainID = 4
    fewShotCnt = 100
    fewShotSet, oldDomain, newDomain = obtain_Domain_set(
        f"../../data/twitter_fs{domainID}_{fewShotCnt}",
        f"../../data/twitter_od{domainID}_{fewShotCnt}",
        f"../../data/twitter_nd{domainID}_{fewShotCnt}"
    )

    newDomainName = newDomain.data[newDomain.data_ID[0]]['event']
    # log_dir = "MetaSelfTrain_0"
    logDir = str(__file__).rstrip(".py")
    trainer = CRST_LRENT(logDir,
                         "{}_FS{}".format(newDomainName, fewShotCnt),
                         "{}/model_{}.pth".format(logDir, newDomainName),
                         class_num=2)

    Tf_IdfTwitterFile = "../../saved/TfIdf_twitter.pkl"
    if os.path.exists(Tf_IdfTwitterFile):
        with open(Tf_IdfTwitterFile, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [fewShotSet, oldDomain, newDomain]
                  for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(Tf_IdfTwitterFile, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)
    model1 = obtain_model(tv)
    model1.load_model(BiGCN2_Paths[domainID])

    pseaudoIdxs = []
    expandSetIdxs = []
    unlabeledSet = newDomain
    ULabel = torch.tensor(unlabeledSet.data_y).argmax(dim=1)
    print(f"fewShotSet/newDomain/oldDomain = {len(fewShotSet)}/{len(newDomain)}/{len(oldDomain)}")

    trainer.logPerf(model1, newDomain, ULabel, trainer.suffix)
    trainer.trainning(model1,
                      labeled_source=oldDomain,
                      labeled_target=None,
                      unlabeled_target=newDomain,
                      UT_label = ULabel,
                      valid_set=fewShotSet,
                      max_iterate=100, isWeightInited=True
                      )