import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import MetaEvaluator, WeightedAcc
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
import pickle
import torch
import torch.nn as nn
import random
import torch.nn.functional as F
import os
import gc
import fitlog
from tqdm import trange

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:RumorDetection, data, pseaudo_idxs=[], batch_size=20):
    c_idxs = list(set(range(len(data))) - set(pseaudo_idxs))
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = (pred_tensor > 0.5).long().tolist()
    for i, idx in enumerate(c_idxs):
        data.data_y[idx] = weak_label[i]
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    return entrophy, preds[:, 1], confs[:, 1]

def obtain_model(tfidf_vec):
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/", grad_preserve=True,
                         emb_update=True, aug_type="mix")
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def AugLoss(model:BiGCNRumorDetec, batch):
    preds = model.AugPredict(batch)
    epsilon = torch.ones_like(preds) * 1e-8
    preds = (preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
    labels = batch[-2].to(model.device)
    loss = F.nll_loss(preds.log(), labels)
    acc = accuracy_score(labels.cpu().numpy(), preds.argmax(dim=1).cpu().numpy())
    return loss, acc

def InstanceContrastiveLoss(model:BiGCNRumorDetec, batch1):
    """
    :param model:
    :param batch1: the compared instances, its number is equal to the batchsize, e.g. 20, 32, etc.
    :param batch2: the anchor instances, its number is equal to the number of topic, e.g. 5 in Pheme Dataset
    :param sim: the similarity matrix is a [batch1, batch2] one,
    :return:
    """
    tau = 0.1
    alpha = 0.15
    vecs1 = model.AugBatch2Vecs(batch1)
    vecs2 = model.AugBatch2Vecs(batch1)
    norm_mtx = torch.matmul(vecs1.norm(dim=1).unsqueeze(-1),
                            vecs2.norm(dim=1).unsqueeze(0)) \
                + torch.ones([len(vecs1), len(vecs2)], device=vecs2.device)*1e-8
    cosine = torch.matmul(vecs1, vecs2.T)/norm_mtx
    cosine = cosine/tau
    similarity = cosine.softmax(dim=1)
    epsilon = torch.ones_like(similarity) * 1e-8
    similarity = (similarity- epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
    loss = F.nll_loss(similarity.log(), torch.arange(len(epsilon)).to(cosine.device))

    preds = model.rdm_cls(vecs1).softmax(dim=1)
    preds = (preds - torch.ones_like(preds) * 1e-8).abs()
    batch1[-2][batch1[-1].__eq__(domain_ID)] = -1
    rdm_loss = F.nll_loss(preds.log(), batch1[-2].to(preds.device), ignore_index=-1)
    if torch.isnan(loss):
        print("similarity:", similarity.tolist())
    acc = accuracy_score(torch.arange(len(epsilon)),
                         similarity.data.argmax(dim=1).cpu())
    return loss+alpha*rdm_loss, acc

def DomainContrastiveLoss(model:BiGCNRumorDetec, batch1, batch2, sim):
    """
    :param model:
    :param batch1: the compared instances, its number is equal to the batchsize, e.g. 20, 32, etc.
    :param batch2: the anchor instances, its number is equal to the number of topic, e.g. 5 in Pheme Dataset
    :param sim: the similarity matrix is a [batch1, batch2] one,
    :return:
    """
    tau = 0.1
    alpha = 0.15
    vecs1 = model.AugBatch2Vecs(batch1)
    vecs2 = model.AugBatch2Vecs(batch2)
    norm_mtx = torch.matmul(vecs1.norm(dim=1).unsqueeze(-1),
                            vecs2.norm(dim=1).unsqueeze(0)) \
                + torch.ones([len(vecs1), len(vecs2)], device=vecs2.device)*1e-8
    cosine = torch.matmul(vecs1, vecs2.T)/norm_mtx
    cosine = cosine/tau

    similarity = cosine.softmax(dim=1)
    epsilon = torch.ones_like(similarity) * 1e-8
    similarity = (similarity- epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
    loss = F.nll_loss(similarity.log(), sim.to(cosine.device))

    preds = model.rdm_cls(vecs1).softmax(dim=1)
    preds = (preds - torch.ones_like(preds)*1e-8).abs()
    batch1[-2][batch1[-1].__eq__(domain_ID)] = -1
    rdm_loss = F.nll_loss(preds.log(), batch1[-2].to(preds.device), ignore_index=-1)
    if torch.isnan(loss):
        print("similarity:", similarity.tolist())
        print("sim_mtx:", sim.tolist())
    acc = accuracy_score(sim, similarity.data.argmax(dim=1).cpu())
    return loss+alpha*rdm_loss, acc

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix):
    fs_set = TwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = TwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = TwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def Convert_2_BiGCNFormat(data):
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data

def DomainIter(data, batchsize=20, max_iter=-1):
    dataIndices = list(range(len(data)))
    topic_labels = [data.data[data.data_ID[index]]['topic_label'] for index in range(len(data))]
    topic_indexs = [[idx for idx, label in enumerate(topic_labels) if label == topic]
                                                for topic in range(max(topic_labels)+1)]
    max_iter = len(data)*max(topic_labels) if max_iter== -1 else max_iter
    for i in range(max_iter):
        anchor_idxs = [random.sample(l, 1)[0] for l in topic_indexs]
        anchor_batch = data.collate_raw_batch([data[idx] for idx in anchor_idxs])
        contrastive_batch = data.collate_raw_batch([data[idx] for idx in random.sample(dataIndices, batchsize)])
        sim = contrastive_batch[-1]
        yield anchor_batch, contrastive_batch, sim

def dataset2Vecs(model, dataset, batch_size=20):
    vec_list, label_list= [], []
    with torch.no_grad():
        for i in trange(0, len(dataset), batch_size):
            batch = dataset.collate_raw_batch([dataset[j] for j in range(i, min(i+20, len(dataset)), 1)])
            vecs = model.Batch2Vecs(batch)
            label_list.append(batch[-1])
            vec_list.append(vecs)
        vecTensor = torch.cat(vec_list, dim=0)
        labelTensor = torch.cat(label_list,dim=0)
    return vecTensor, labelTensor

def InstanceContrastiveInference(model, dataset, batch_size=20):
    vecTensor, domain_label = dataset2Vecs(model, dataset, batch_size=batch_size)
    cosine = torch.matmul(vecTensor, vecTensor.T) / torch.matmul(vecTensor.norm(dim=1).unsqueeze(-1),
                                                         vecTensor.norm(dim=1).unsqueeze(0))
    domain_label = domain_label + 1
    mtx = torch.matmul(domain_label.unsqueeze(1), domain_label.unsqueeze(0))/\
                (domain_label*domain_label).unsqueeze(1)
    eq_mtx = mtx.__eq__(1).float()
    coeff_mtx = eq_mtx + (eq_mtx - 1) - torch.eye(len(eq_mtx))
    return (cosine*coeff_mtx.to(cosine.device)).sum()/2.0

def InferPerf(model, query_set, support_set, batch_size=20):
    vec_query, label_query = dataset2Vecs(model, query_set, batch_size=batch_size)
    vec_support, label_support = dataset2Vecs(model, support_set, batch_size=batch_size)
    with open("InferPerfEnv.pkl", "wb") as fb:
        pickle.dump((vec_query, vec_support, label_query, label_support),
                    fb, protocol=pickle.HIGHEST_PROTOCOL)
    cosine = torch.matmul(vec_query, vec_support.T) / torch.matmul(vec_query.norm(dim=1).unsqueeze(-1),
                                                                   vec_support.norm(dim=1).unsqueeze(0))
    votes = cosine.__gt__(0).int().sum(dim=1)
    votes = votes/(len(support_set)*1.0)
    selected_label = label_query[votes.__gt__(0.5)]
    print("Inference Performance:", acc_P_R_F1(torch.ones_like(selected_label)*domain_ID, selected_label))

def Training(model:RumorDetection, train_set, dev_set,
                valid_every=100, max_iters=100000, lr_discount=1.0,
                best_valid_acc=0.0, log_dir="../logs/", log_suffix="_RumorDetection",
                model_file="./DomainContrastive.pth", RenameModel=True):
    fitlog.set_log_dir("%s/" % log_dir, new_log=True)
    optim = torch.optim.Adam([
        {'params': model.sent2vec.parameters(), 'lr': 5e-7 * lr_discount / model.grad_accum_cnt},
        {'params': model.prop_model.parameters(), 'lr': 1e-5 * lr_discount / model.grad_accum_cnt},
        {'params': model.rdm_cls.parameters(), 'lr': 1e-5 * lr_discount / model.grad_accum_cnt}
    ])
    counter = 0
    optim.zero_grad()
    model.train()
    sum_loss, sum_acc = 0.0, 0.0
    max_dist = -1e8
    train_idxs = list(range(len(train_set)))
    for step in range(max_iters):
        idxs = random.sample(train_idxs, 32)
        batch = train_set.collate_raw_batch([train_set[idx] for idx in idxs])
        loss, acc = InstanceContrastiveLoss(model, batch)
        loss.backward()
        model.sent2vec.PreserveGrad()
        torch.cuda.empty_cache()
        gc.collect()
        sum_loss += loss
        sum_acc += acc
        if (step + 1) % model.grad_accum_cnt == 0:
            optim.step()
            optim.zero_grad()
            print('%6d | %6d, loss/acc = %6.8f/%6.7f, best_valid_acc:%6.7f ' % (
                step, max_iters,
                sum_loss / model.grad_accum_cnt, sum_acc / model.grad_accum_cnt,
                best_valid_acc
            )
                  )
            fitlog.add_metric(
                {"train": {"acc": sum_acc / model.grad_accum_cnt, "loss": sum_loss / model.grad_accum_cnt}},
                step=counter
            )
            sum_loss, sum_acc = 0.0, 0.0
            counter += 1
        if (step+1)%(model.grad_accum_cnt*valid_every)==0:
            mutual_dist = InstanceContrastiveInference(model, dev_set, batch_size=20)
            print("=======> Mutual Distance:", mutual_dist)
            if mutual_dist > max_dist:
                max_dist = mutual_dist
                model.save_model(model_file)

if __name__ == "main":
    BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.74.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
                   ]

    BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
                   "../../saved/TFIDF_BiGCN_ferguson_0.75.pkl",
                   "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
                   "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
                   "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
                   ]

    domain_ID = 4
    few_shot_cnt = 100
    few_shot_set, old_domain, new_domain = obtain_Domain_set(
                                                f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                                f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                                f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}"
                                            )
    Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
    if os.path.exists(Tf_Idf_twitter_file):
        with open(Tf_Idf_twitter_file, "rb") as fr:
            tv = pickle.load(fr)
    else:
        lemma = Lemma_Factory()
        corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                        for ID in data.data_ID for txt in data.data[ID]['text']]
        tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
        _ = tv.fit_transform(corpus)
        with open(Tf_Idf_twitter_file, "wb") as fw:
            pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

    # log_dir = str(__file__).rstrip(".py")
    # # log_dir = "MetaSelfTrain_0"
    # if not os.path.exists(log_dir):
    #     os.system("mkdir %s"%log_dir)
    # else:
    #     os.system("rm -rf %s" % log_dir)
    #     os.system("mkdir %s" % log_dir)
    # fitlog.set_log_dir("%s/" % log_dir, new_log=True)

    model1 = obtain_model(tv)
    # model1.load_model("./DomainContrastive.pkl")
    # query_set = Convert_2_BiGCNFormat(Merge_data(few_shot_set, old_domain))
    # support_set = Convert_2_BiGCNFormat(new_domain)
    # InferPerf(model1, query_set, support_set)

    import pickle
    if os.path.exists("./train_set.pkl") and os.path.exists("./dev_set.pkl"):
        with open("./train_set.pkl", "rb")  as fr:
            train_set = pickle.load(fr)
        with open("./dev_set.pkl", "rb") as fr:
            dev_set = pickle.load(fr)
    else:
        train_set = BiGCNTwitterSet(batch_size=32)
        train_set.load_data(data_path = "../../../pheme-rnr-dataset/")
        train_set, dev_set = train_set.split(percent=[0.9, 1.0])
        with open("./train_set.pkl", "wb") as fw:
            pickle.dump(train_set, fw, protocol=pickle.HIGHEST_PROTOCOL)
        with open("./dev_set.pkl", "wb") as fw:
            pickle.dump(dev_set, fw, protocol=pickle.HIGHEST_PROTOCOL)

    Training(model1, train_set, dev_set, model_file="./InstanceContrastive.pth")