import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.weiboloader import BiGCNWeiboSet, topics
from SentModel.Sent2Vec import TFIDFBasedVec_CN
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import pickle, os, random, fitlog, math, pandas as pd
import torch, torch.nn.functional as F, torch.nn as nn
from tqdm import trange


def DataIter(labeled_source, labeled_target, batch_size=32):
    if not hasattr(labeled_source, 'valid_indexs'):
        labeled_source.valid_indexs = list(range(len(labeled_source)))
    p_idxs = labeled_source.valid_indexs
    p_len = len(p_idxs)
    if labeled_target is None:
        l_len = 0
        l_idxs = []
    else:
        l_idxs = list(range(len(labeled_target))) if not hasattr(labeled_target, 'valid_indexs') \
            else labeled_target.valid_indexs
        l_len = len(l_idxs)
    data_size = p_len + l_len
    def generator():
        idxs = random.sample(range(data_size), data_size) * 2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i + batch_size)]
            items = [labeled_source[p_idxs[idx]] if idx < p_len else \
                         labeled_target[l_idxs[idx - p_len]] for idx in batch_idxs]
            yield labeled_source.collate_raw_batch(items)
    return math.ceil(data_size*1.0/batch_size), generator

class SAndTUtils:
    def __init__(self):
        pass

    def acc_P_R_F1(self, y_true, y_pred):
        return accuracy_score(y_true, y_pred.cpu()), \
                    precision_recall_fscore_support(y_true, y_pred.cpu())

    def dataIter(self, pseudo_set, labeled_target=None, batch_size=32):
        p_idxs = list(range(len(pseudo_set))) if not hasattr(pseudo_set, 'valid_indexs') else pseudo_set.valid_indexs
        p_len = len(p_idxs)
        if labeled_target is None:
            l_len = 0
            l_idxs = []
        else:
            l_idxs = list(range(len(labeled_target))) if not hasattr(labeled_target, 'valid_indexs') \
                                                        else labeled_target.valid_indexs
            l_len = len(l_idxs)
        data_size = p_len + l_len
        idxs = random.sample(range(data_size), data_size)*2
        for start_i in range(0, data_size, batch_size):
            batch_idxs = idxs[(start_i):(start_i+batch_size)]
            items = [pseudo_set[p_idxs[idx]] if idx < p_len else \
                        labeled_target[l_idxs[idx-p_len]] for idx in batch_idxs]
            yield pseudo_set.collate_raw_batch(items)

    def obtainOptim(self, tr_model, learning_rate):
        return torch.optim.Adam([
            {'params': tr_model.parameters(), 'lr': learning_rate}
        ])

    def lossAndAcc(self, model, batch, temperature=1.0):
        pooledOutput = model.Batch2Vecs(batch)
        logits = model.rdm_cls(pooledOutput)
        preds = F.softmax(logits / temperature, dim=1)
        epsilon = torch.ones_like(preds) * 1e-8
        preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
        labels = batch[-2].to(preds.device)
        labels = labels.argmax(dim=1) if labels.dim() == 2 else labels
        loss = F.nll_loss(preds.log(), labels)
        acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
        return loss, acc

    def dataset_logits(self, model: RumorDetection, data, idxs=None, batch_size=40):
        preds = []
        if idxs is None:
            idxs = list(range(len(data)))
        for i in trange(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i + batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
        pred_tensor = torch.cat(preds)
        return pred_tensor

    def dataset_inference(self, model:RumorDetection, data, idxs=None, batch_size=20):
        pred_tensor = self.dataset_logits(model, data, idxs, batch_size)
        vals, idxs = pred_tensor.sort(dim=1)
        return idxs[:, 1], vals[:, 1]

    def perf(self, model:RumorDetection, data, label, idxs=None, batch_size=20):
        with torch.no_grad():
            predTensor = self.dataset_logits(model, data, idxs, batch_size)
            _, yPred = predTensor.sort(dim=1)
        yTrue = label[idxs] if idxs is not None else label
        loss = F.nll_loss(predTensor.log(), yTrue.to(predTensor.device))
        return self.acc_P_R_F1(yTrue, yPred[:, -1]) + (loss,)

class SAndT_Trainer(SAndTUtils):
    def __init__(self, log_dir, suffix, model_file, class_num, temperature=1.0,
                 learning_rate=5e-3, batch_size=32):
        super(SAndT_Trainer, self).__init__()
        if not os.path.exists(log_dir):
            os.system("mkdir {}".format(log_dir))
        fitlog.set_log_dir("{}/".format(log_dir), new_log=True)
        self.log_dir = log_dir
        self.suffix = suffix
        self.model_file = model_file
        self.best_valid_acc = 0.0
        self.min_valid_loss = 1e8
        self.class_num = class_num
        self.temperature = temperature
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.valid_step = 0

    def ModelTrain(self, trModel : RumorDetection, labeledSource : BiGCNWeiboSet,
                   labeledTarget : BiGCNWeiboSet, unlabeledTarget : BiGCNWeiboSet,
                   validSet : BiGCNWeiboSet, UT_Label, maxEpoch, validEvery=20):
        print("labeled Source/labeled Target/unlabeled Target: {}/{}/{}".format(len(labeledSource),
                                                                                len(labeledTarget) if labeledTarget is not None else 0,
                                                                                len(unlabeledTarget)))
        optim = torch.optim.Adam([
            {'params': trModel.sent2vec.parameters(), 'lr': self.learning_rate * 0.1},
            {'params': trModel.prop_model.parameters(), 'lr': self.learning_rate},
            {'params': trModel.rdm_cls.parameters(), 'lr': self.learning_rate}
        ])
        validLabel = torch.tensor(validSet.data_y).argmax(dim=1)
        for epoch in range(maxEpoch):
            maxIters, trainLoader = DataIter(labeledSource, labeledTarget, self.batch_size)
            for step, batch1 in enumerate(trainLoader()):
                loss, acc = self.lossAndAcc(trModel, batch1, temperature=self.temperature)
                trainLoss = loss
                optim.zero_grad()
                trainLoss.backward()
                optim.step()
                torch.cuda.empty_cache()
                print('####Model Update (%3d | %3d) %3d | %3d ####, loss = %6.8f, acc = %6.8f' % (
                    step, maxIters, epoch, maxEpoch, loss.data.item(), acc
                ))
                if (step + 1) % validEvery == 0:
                    rst = self.perf(trModel, validSet, validLabel)
                    acc_v, (p_v, r_v, f1_v, _), loss_v = rst
                    print("valid perf:", rst)
                    output_items = [("valid_acc", acc_v)] + \
                                   [("valid_loss", loss_v)] + \
                                   [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                                   [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                                   [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]
                    fitlog.add_metric({f"ValidPerf_{self.suffix}": dict(output_items)}, step=self.valid_step)
                    if acc > self.best_valid_acc:
                        torch.save(trModel.state_dict(), self.model_file)
                        self.best_valid_acc = acc
                        self.logPerf(trModel, unlabeledTarget, UT_Label, self.suffix, step=1)
                    else:
                        self.logPerf(trModel, unlabeledTarget, UT_Label, self.suffix)

    def logPerf(self, model, test_set, test_label, test_suffix, step=0):
        rst_model = self.perf(model, test_set, test_label)
        acc_v, (p_v, r_v, f1_v, _), loss_v = rst_model
        if step > 0:
            print("Best Perf:", rst_model)
        else:
            print("Test Perf:", rst_model)
        output_items = [("valid_acc", acc_v)] + \
                       [("valid_loss", loss_v)] + \
                       [('valid_prec_{}'.format(i), p_v[i]) for i in range(self.class_num)] + \
                       [('valid_recall_{}'.format(i), r_v[i]) for i in range(self.class_num)] + \
                       [('valid_f1_{}'.format(i), f1_v[i]) for i in range(self.class_num)]

        fitlog.add_metric({f"TestPerf_{test_suffix}": dict(output_items)}, step=self.valid_step)
        self.valid_step += 1

def obtainOnlineSet(old_prefix, dev_prefix, te_prefix, lt_cnt=0):
    fs_set = BiGCNWeiboSet()
    od_set = BiGCNWeiboSet()
    nd_set = BiGCNWeiboSet()
    try:
        od_set.load_data_fast(data_prefix=old_prefix)
        fs_set.load_data_fast(data_prefix=dev_prefix)
        nd_set.load_data_fast(data_prefix=te_prefix)
    except AssertionError:
        weibo_dir, df_file = "../../data/Weibo", "../../data/Weibo_IDs.csv"
        df = pd.read_csv(df_file)
        od_set.load_data(weibo_dir, weibo_df=df[:2500])
        od_set.Caches_Data(old_prefix)
        dev_te_df = df[2500:].sample(frac=1.0)
        fs_set.load_data(weibo_dir, weibo_df=dev_te_df[:100])
        fs_set.Caches_Data(dev_prefix)
        nd_set.load_data(weibo_dir, weibo_df=dev_te_df[100:])
        nd_set.Caches_Data(te_prefix)
    if lt_cnt > 0:
        nd_set_1, nd_set_2 = nd_set.split([lt_cnt*1.0 / len(nd_set), 1.0])
        return fs_set, od_set, nd_set_2, nd_set_1
    else:
        return fs_set, od_set, nd_set, None


def obtain_model(tfidf_vec) -> BiGCNRumorDetec:
    lvec = TFIDFBasedVec_CN(tfidf_vec, 20, embedding_size=300,
                            w2v_file="../../word2vec_CN_WeiboBi.pkl",
                            emb_update=True, grad_preserve=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model


if __name__ == "__main__":
    logDir = str(__file__).rstrip(".py")
    logDir = "./tmp/"
    if os.path.exists(logDir):
        os.system("rm -rf %s" % logDir)
    os.system("mkdir %s" % logDir)

    fewShotCnt = 100
    validTarget, labeledSource, unlabeledTarget, labeledTarget = obtainOnlineSet(
                     f"../../data/WeiboOnline_FS{fewShotCnt}_tr",
                     f"../../data/WeiboOnline_FS{fewShotCnt}_dev",
                     f"../../data/WeiboOnline_FS{fewShotCnt}_te",
                     lt_cnt=0
    )
    TfIdf_twitter_file = "../../saved/TfIdf_WEIBO.pkl"
    if os.path.exists(TfIdf_twitter_file):
        with open(TfIdf_twitter_file, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [validTarget, labeledSource, unlabeledTarget, labeledTarget]
                                        for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(TfIdf_twitter_file, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)
    newDomainName = "OnlineTest"

    model1 = obtain_model(tv)
    ut_label = torch.tensor(unlabeledTarget.data_y).argmax(dim=1)
    # valSet, unlabeledSet = newDomain.split([200.0/len(newDomain), 1.0])

    trainer = SAndT_Trainer(log_dir=logDir, suffix=f"{newDomainName}_FS{fewShotCnt}",
                         model_file=f"{logDir}/SAndT_{newDomainName}_FS{fewShotCnt}",
                         class_num=2, temperature=1.0, learning_rate=5e-4, batch_size=32)
    trainer.logPerf(model1, unlabeledTarget, ut_label, trainer.suffix)
    trainer.ModelTrain(model1, labeledSource, labeledTarget, unlabeledTarget, validTarget,
                       ut_label, maxEpoch=100, validEvery=20)