import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import MetaEvaluator, WeightedAcc
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
import pickle
import torch
import torch.nn as nn
import random
import torch.nn.functional as F
import os
import fitlog
from tqdm import trange

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:RumorDetection, data, pseaudo_idxs=[], batch_size=20):
    c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = (pred_tensor > 0.5).long().tolist()
    for i, idx in enumerate(c_idxs):
        data.data_y[idx] = weak_label[i]
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    return entrophy, preds[:, 1], confs[:, 1]

def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True, aug_type="mix")
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def AugLoss(model:BiGCNRumorDetec, batch):
    preds = model.AugPredict(batch)
    epsilon = torch.ones_like(preds) * 1e-8
    preds = (
                preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
    labels = batch[-2].to(model.device)
    loss = F.nll_loss(preds.log(), labels)
    acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
    return loss, acc


def ContrastiveLoss(model:BiGCNRumorDetec, batch1, batch2, sim):
    """
    :param model:
    :param batch1: the compared instances, its number is equal to the batchsize, e.g. 20, 32, etc.
    :param batch2: the anchor instances, its number is equal to the number of topic, e.g. 5 in Pheme Dataset
    :param sim: the similarity matrix is a [batch1, batch2] one,
    :return:
    """
    vecs1 = model.AugBatch2Vecs(batch1)
    vecs2 = model.AugBatch2Vecs(batch2)
    norm_mtx = torch.matmul(vecs1.norm(dim=1).unsqueeze(-1),
                            vecs2.norm(dim=1).unsqueeze(0)) \
                + torch.ones([len(vecs1), len(vecs2)], device=vecs2.device)*1e-8
    cosine = torch.matmul(vecs1, vecs2.T)/norm_mtx
    similarity = cosine.softmax(dim=1)
    epsilon = torch.ones_like(similarity) * 1e-8
    similarity = (similarity- epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
    loss = F.nll_loss(similarity.log(), sim.to(cosine.device))

    preds = model.rdm_cls(vecs1).softmax(dim=1)
    preds = (preds - torch.ones_like(preds)*1e-8).abs()
    batch1[-2][batch1[-1].__eq__(domain_ID)] = -1
    rdm_loss = F.nll_loss(preds.log(), batch1[-2].to(preds.device), ignore_index=-1)
    if torch.isnan(loss):
        print("similarity : ", similarity.tolist())
        print("sim_mtx : ", sim.tolist())
    acc = accuracy_score(sim, similarity.data.argmax(dim=1).cpu())
    return loss+rdm_loss, acc

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = TwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = TwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = TwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def Convert_2_BiGCNFormat(data):
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data

def DomainIter(data, batchsize=20, max_iter=-1):
    dataIndices = list(range(len(data)))
    topic_labels = [data.data[data.data_ID[index]]['topic_label'] for index in range(len(data))]
    topic_indexs = [[idx for idx, label in enumerate(topic_labels) if label == topic]
                                                for topic in range(max(topic_labels)+1)]
    max_iter = len(data)*max(topic_labels) if max_iter== -1 else max_iter
    for i in range(max_iter):
        anchor_idxs = [random.sample(l, 1)[0] for l in topic_indexs]
        anchor_batch = data.collate_raw_batch([data[idx] for idx in anchor_idxs])
        contrastive_batch = data.collate_raw_batch([data[idx] for idx in random.sample(dataIndices, batchsize)])
        sim = contrastive_batch[-1]
        yield anchor_batch, contrastive_batch, sim

def dataset2Vecs(model, dataset, batch_size=20):
    vec_list, label_list = [], []
    with torch.no_grad():
        for i in trange(0, len(dataset), batch_size):
            batch = dataset.collate_raw_batch([dataset[j] for j in range(i, min(i+20, len(dataset)), 1)])
            vecs = model.Batch2Vecs(batch)
            label_list.append(batch[-1])
            vec_list.append(vecs)
        vecTensor = torch.cat(vec_list, dim=0)
        labelTensor = torch.cat(label_list,dim=0)
    return vecTensor, labelTensor

def ContrastiveInference(model, dataset, batch_size=20):
    vecTensor, labelTensor = dataset2Vecs(model, dataset, batch_size=batch_size)
    cosine = torch.matmul(vecTensor, vecTensor.T) / torch.matmul(vecTensor.norm(dim=1).unsqueeze(-1),
                                                         vecTensor.norm(dim=1).unsqueeze(0))
    pos_pair = cosine.__gt__(0).int()
    sim = torch.matmul(labelTensor.unsqueeze(-1)+1, labelTensor.unsqueeze(0)+1)*1.0/ \
          ((labelTensor+1)*(labelTensor+1)).unsqueeze(-1)
    sim_pair = sim.__eq__(1.0).int()
    return acc_P_R_F1(sim_pair.reshape([-1]), pos_pair.reshape([-1]))

def InferPerf(model, query_set, support_set, batch_size=20):
    vec_query, label_query = dataset2Vecs(model, query_set, batch_size=batch_size)
    vec_support, label_support = dataset2Vecs(model, support_set, batch_size=batch_size)
    with open("InferPerfEnv.pkl", "wb") as fb:
        pickle.dump((vec_query, vec_support, label_query, label_support),
                    fb, protocol=pickle.HIGHEST_PROTOCOL)
    cosine = torch.matmul(vec_query, vec_support.T) / torch.matmul(vec_query.norm(dim=1).unsqueeze(-1),
                                                                   vec_support.norm(dim=1).unsqueeze(0))
    votes = cosine.__gt__(0).int().sum(dim=1)
    votes = votes/(len(support_set)*1.0)
    selected_label = label_query[votes.__gt__(0.5)]
    print("Inference Performance:", acc_P_R_F1(torch.ones_like(selected_label)*domain_ID, selected_label))

def Training(model:RumorDetection, train_set, dev_set,
                valid_every=100, max_iters=100000, lr_discount=1.0,
                best_valid_acc=0.0, log_dir="../logs/", log_suffix="_RumorDetection",
                model_file="./DomainContrastive.pth", RenameModel=True):
    fitlog.set_log_dir("%s/" % log_dir, new_log=True)
    train_iter = DomainIter(train_set, batchsize=20, max_iter=max_iters)
    optim = torch.optim.Adam([
        {'params': model.sent2vec.parameters(), 'lr': 5e-5 * lr_discount / model.grad_accum_cnt},
        {'params': model.prop_model.parameters(), 'lr': 1e-3 * lr_discount / model.grad_accum_cnt},
        {'params': model.rdm_cls.parameters(), 'lr': 1e-3 * lr_discount / model.grad_accum_cnt}
    ], weight_decay=0.5)
    counter = 0
    optim.zero_grad()
    model.train()
    sum_loss, sum_acc = 0.0, 0.0
    accIncrement_counter = 0
    for step, (anchor_batch, contrastive_batch, sim) in enumerate(train_iter):
        loss, acc = ContrastiveLoss(model, contrastive_batch, anchor_batch, sim)
        loss.backward()
        torch.cuda.empty_cache()
        if step % model.grad_accum_cnt == 0:
            optim.step()
            optim.zero_grad()
        sum_loss += loss
        sum_acc += acc
        if (step + 1) % model.grad_accum_cnt == 0:
            print('%6d | %6d, loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                step, max_iters,
                sum_loss / model.grad_accum_cnt, sum_acc / model.grad_accum_cnt,
                best_valid_acc
            )
                  )
            fitlog.add_metric(
                {"train": {"acc": sum_acc / model.grad_accum_cnt, "loss": sum_loss / model.grad_accum_cnt}},
                step=counter
            )
            sum_loss, sum_acc = 0.0, 0.0
            counter += 1
        if (step + 1) % (valid_every * model.grad_accum_cnt) == 0:
            acc, P_R_F1 = ContrastiveInference(model, dev_set)
            print(f"validation: loss={loss}, acc/best acc={acc}/{best_valid_acc}, P_R_F1=", P_R_F1)
            if acc > best_valid_acc:
                best_valid_acc = acc
                model.save_model(model_file)
                accIncrement_counter = 0
            else:
                accIncrement_counter += 1
                if accIncrement_counter == 5:
                    return


BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.74.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
               ]

BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
               ]

domain_ID = 4
few_shot_cnt = 100
few_shot_set, old_domain, new_domain = obtain_Domain_set(
                                            f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}"
                                        )
Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                    for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

log_dir = str(__file__).rstrip(".py")
# log_dir = "MetaSelfTrain_0"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)
fitlog.set_log_dir("%s/" % log_dir, new_log=True)

model1 = obtain_model(tv)
model1.load_model("./DomainContrastive.pkl")
query_set = Convert_2_BiGCNFormat(Merge_data(few_shot_set, old_domain))
support_set = Convert_2_BiGCNFormat(new_domain)

InferPerf(model1, query_set, support_set)

import pickle
if os.path.exists("./train_set.pkl") and os.path.exists("./dev_set.pkl"):
    with open("./train_set.pkl", "rb")  as fr:
        train_set = pickle.load(fr)
    with open("./dev_set.pkl", "rb") as fr:
        dev_set = pickle.load(fr)
else:
    train_set = BiGCNTwitterSet(batch_size=32)
    train_set.load_data(data_path = "../../../pheme-rnr-dataset/")
    train_set, dev_set = train_set.split(percent=[0.9, 1.0])
    with open("./train_set.pkl", "wb") as fw:
        pickle.dump(train_set, fw, protocol=pickle.HIGHEST_PROTOCOL)
    with open("./dev_set.pkl", "wb") as fw:
        pickle.dump(dev_set, fw, protocol=pickle.HIGHEST_PROTOCOL)

Training(model1, train_set, dev_set)