import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import InstanceReweighting, update_params
import copy
from RumdetecFramework.InstanceReweighting import update_current_devices, to_var, params
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
from typing import Tuple, List, Callable, AnyStr
import torch.nn.functional as F
import math
import pickle
import matplotlib.pyplot as plt
import torch
from sklearn.manifold import TSNE
import numpy as np
import torch.nn as nn
import random
import os
import fitlog

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:RumorDetection, data, logits=None, batch_size=20):
    if logits is not None:
        c_idxs = torch.arange(len(data))[logits.__ne__(1.0)].tolist()
    else:
        c_idxs = torch.arange(len(data)).tolist()
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = (pred_tensor > 0.5).long().tolist()
    for i, idx in enumerate(c_idxs):
        data.data_y[idx] = weak_label[i]
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    if logits is not None:
        logits[logits.__ne__(1.0)] = confs[:, 1]
        return entrophy, preds[:, 1], logits
    else:
        return entrophy, preds[:, 1], confs[:, 1]

def obtain_model(tfidf_vec) -> BiGCNRumorDetec:
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix) -> Tuple[TwitterSet, TwitterSet, TwitterSet]:
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def Convert_2_BiGCNFormat(data) -> BiGCNTwitterSet:
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data

def BiGCNEvaluater(data_set:BiGCNTwitterSet, label):
    def evaluater(model:BiGCNRumorDetec):
        acc, p_r_f1 = Perf(model, data_set, label)
        return acc, p_r_f1[0][1], p_r_f1[1][1], p_r_f1[2][1]
    return evaluater

def balancedTrainingIter(weak_set, batch_size, valid_idxs=None):
    labels = torch.tensor(weak_set.data_y).argmax(dim=1)[valid_idxs]
    valid_idxs = valid_idxs if isinstance(valid_idxs, torch.Tensor) else torch.tensor(valid_idxs)
    pos_idxs = valid_idxs[labels.__eq__(1)].tolist()
    neg_idxs = valid_idxs[labels.__eq__(0)].tolist()
    if len(pos_idxs) > len(neg_idxs):
        major_idxs = random.sample(pos_idxs, len(pos_idxs))
        minor_idxs = random.sample(neg_idxs, len(neg_idxs))
    else:
        major_idxs = random.sample(neg_idxs, len(neg_idxs))
        minor_idxs = random.sample(pos_idxs, len(pos_idxs))
    major_size, minor_size = len(major_idxs), len(minor_idxs)
    batch_size = batch_size if major_size > batch_size else major_size
    minor_batchsize = min(batch_size // 2, minor_size)
    major_batchsize = batch_size - minor_batchsize
    def dataIter():
        for i in range(0, major_size, major_batchsize):
            training_idxs = [major_idxs[(j%major_size)] for j in range(i, i+major_batchsize, 1)] + \
                            [minor_idxs[(j%minor_size)] for j in range(i, i+minor_batchsize, 1)]
            yield training_idxs, weak_set.collate_raw_batch([weak_set[j] for j in training_idxs])
    return major_batchsize, minor_batchsize, dataIter

def DownSamplingIter(weak_set, batch_size, valid_idxs=None):
    labels = torch.tensor(weak_set.data_y).argmax(dim=1)[valid_idxs]
    valid_idxs = valid_idxs if isinstance(valid_idxs, torch.Tensor) else torch.tensor(valid_idxs)
    pos_idxs = valid_idxs[labels.__eq__(1)].tolist()
    neg_idxs = valid_idxs[labels.__eq__(0)].tolist()
    if len(pos_idxs) > len(neg_idxs):
        major_idxs = random.sample(pos_idxs, len(pos_idxs))
        minor_idxs = random.sample(neg_idxs, len(neg_idxs))
    else:
        major_idxs = random.sample(neg_idxs, len(neg_idxs))
        minor_idxs = random.sample(pos_idxs, len(pos_idxs))
    major_size, minor_size = len(major_idxs), len(minor_idxs)
    batch_size = batch_size if major_size > batch_size else major_size
    minor_batchsize = min(batch_size // 2, minor_size)
    major_batchsize = batch_size - minor_batchsize
    for i in range(0, minor_size, minor_batchsize):
        training_idxs = [major_idxs[(j%major_size)] for j in range(i, i+major_batchsize, 1)] + \
                        [minor_idxs[(j%minor_size)] for j in range(i, i+minor_batchsize, 1)]
        yield training_idxs, weak_set.collate_raw_batch([weak_set[j] for j in training_idxs])

def featureVisualization(dataset:TwitterSet, batchsize=20):
    def Visualization(model, iter):
        pos_cnt = dataset.labelTensor.sum()
        neg_cnt = len(dataset) - pos_cnt
        idxs = dataset.labelTensor.argsort()
        vecs = []
        with torch.no_grad():
            for i in range(0, len(idxs), batchsize):
                vec = model.Batch2Vecs(dataset.collate_raw_batch([dataset[idx] for idx in idxs[i:i+batchsize]]))
                vecs.append(vec)
        feats = torch.cat(vecs)
        cosine_socre = torch.matmul(feats, feats.T) / torch.matmul(
            feats.norm(2, dim=1).unsqueeze(1),
            feats.norm(2, dim=1).unsqueeze(0)
        )
        sim = (cosine_socre - 1e-6).acos()/np.pi
        preds = sim.__lt__(0.5).int()
        label = torch.zeros_like(cosine_socre)
        label[:neg_cnt, :neg_cnt] = 1
        label[neg_cnt:, neg_cnt:] = 1
        print("validation performance:", acc_P_R_F1(label.reshape(-1).cpu(), preds.reshape(-1).cpu()))
        tsne = TSNE(n_components=2, init='pca', perplexity=5)
        Y = tsne.fit_transform(feats.cpu())
        plt.cla()
        plt.title("t-SNE Visualization")
        plt.scatter(Y[:neg_cnt, 0], Y[:neg_cnt, 1], c='g', label="negtive sample")
        plt.scatter(Y[neg_cnt:, 0], Y[neg_cnt:, 1], c="r", label="positive sample")
        plt.savefig(f"{log_dir}/featureVisualization_{iter}.png")
    return Visualization

def ContrastivePretrain(model:RumorDetection, weak_set:TwitterSet, weak_set_label:torch.Tensor,
               max_epoch:int, batch_size:int, train_indices:List, visual=None):
    labels, pseaudo_labels = weak_set_label.to(model.device), torch.tensor(weak_set.data_y, device=model.device)
    print("trainSet perf:", acc_P_R_F1(labels[train_indices].cpu(), pseaudo_labels.argmax(dim=1)[train_indices].cpu()))
    lr = -5e-4
    optimizer_grouped_parameters = [
        {'params': p,
         'lr': (lr * 0.1 if "embedding" in n else lr)}
        for n, p in model.named_parameters()
    ]
    model_optim = torch.optim.Adam(optimizer_grouped_parameters)
    for epoch in range(max_epoch):
        start = 0
        sum_loss = 0.
        major_batchsize, minor_batchsize, dataIter = balancedTrainingIter(weak_set, batch_size, train_indices)
        for batch_idxs, batch in dataIter():
            vecs = model.Batch2Vecs(batch)
            cosine_socre = torch.matmul(vecs, vecs.T)/torch.matmul(
                                vecs.norm(2,dim=1).unsqueeze(1),
                                vecs.norm(2, dim=1).unsqueeze(0)
            )
            loss = cosine_socre[:major_batchsize, :major_batchsize].mean() + \
                        cosine_socre[major_batchsize:batch_size, major_batchsize:batch_size].mean()\
                            - cosine_socre[major_batchsize:batch_size, :major_batchsize].mean()
            model_optim.zero_grad()
            loss.backward()
            model_optim.step()
            torch.cuda.empty_cache()
            print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
                start, len(train_indices), loss.data
            ))
            sum_loss += loss.data
            start += batch_size
        mean_loss = (sum_loss * 1.0) / ((len(train_indices) // batch_size) + 1)
        print("mean loss:", mean_loss)
        if visual is not None:
            visual(model, epoch+1)
            print("complete visualization")

def ModelTrain(model:RumorDetection, init_model:RumorDetection, weak_set:TwitterSet, weak_set_label:torch.Tensor,
               max_epoch:int, batch_size:int, train_indices:List, evaluator=None):
    labels, pseaudo_labels = weak_set_label.to(model.device), torch.tensor(weak_set.data_y, device=model.device)
    print("trainSet perf:", acc_P_R_F1(labels[train_indices].cpu(), pseaudo_labels.argmax(dim=1)[train_indices].cpu()))
    lr = 5e-4
    optimizer_grouped_parameters = [
        {'params': p,
         'lr': (lr * 0.1 if "embedding" in n else lr)}
        for n, p in model.named_parameters()
    ]
    model_optim = torch.optim.Adam(optimizer_grouped_parameters)
    for epoch in range(max_epoch):
        start = 0
        sum_loss = 0.
        init_model.load_state_dict(model.state_dict())
        major_batchsize, minor_batchsize, dataIter = balancedTrainingIter(weak_set, batch_size, train_indices)
        for batch_idxs, batch in dataIter():
            S_Preds = model.predict(batch)
            epsilon = torch.ones_like(S_Preds) * 1e-8
            S_Preds = (S_Preds - epsilon).abs()  # to avoid the prediction [1.0, 0.0], which leads to the 'nan' value in log operation
            labels = batch[-2].to(S_Preds.device)
            label_loss = F.nll_loss(S_Preds.log(), labels)
            with torch.no_grad():
                T_Preds = init_model.predict(batch)
                T_Preds = T_Preds.to(model.device)
            distill_loss = -1*(T_Preds*(S_Preds.log())).sum(dim=1).mean()
            loss = label_loss + 10*distill_loss
            model_optim.zero_grad()
            loss.backward()
            model_optim.step()
            torch.cuda.empty_cache()
            print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
                start, len(train_indices), loss.data.mean()
            ))
            sum_loss += loss.data
            start += batch_size
        mean_loss = (sum_loss * 1.0) / ((len(train_indices) // batch_size) + 1)
        print("mean loss:", mean_loss)
        if evaluator is not None:
            val_acc, prec, rec, f1 = evaluator(model)
            print(
                '##### %6d | %6d, val_acc/val_prec/val_rec/val_f1 = %6.8f/%6.7f/%6.7f/%6.7f' % (
                    epoch, max_epoch,
                    val_acc, prec, rec, f1)
            )
        # if mean_loss < 0.2:  # early stop
        #     break

def SelfTrain(tr_model, init_model, weak_set, pseudo_logits, weak_set_label, tr_model_suffix, train_iter):
    flags = pseudo_logits.__ge__(0.9)
    valid_idxs = torch.arange(0, len(weak_set), 1)[flags].tolist()
    ModelTrain(tr_model, init_model, weak_set, weak_set_label, pseudo_logits,
                1, 32, valid_idxs, evaluator = None)
    rst_model1 = Perf(tr_model, weak_set, weak_set_label)
    print(f"{train_iter} : Post-MetaTrain Performance of model1:", rst_model1)
    pseaudo_labels = torch.tensor(weak_set.data_y).argmax(dim=1)
    acc_s, (p_s, r_s, f1_s, _) = acc_P_R_F1(weak_set_label[valid_idxs],
                                            pseaudo_labels[valid_idxs])
    fitlog.add_metric({f"{tr_model_suffix}":
                           {"valid_acc": rst_model1[0],
                            "valid_prec": rst_model1[1][0][1],
                            "valid_recall": rst_model1[1][1][1],
                            "valid_f1": rst_model1[1][2][1],
                            "selected_num": len(valid_idxs) * 1.0 / len(weak_set),
                            "selected_acc": acc_s, "selected_prec": p_s[1],
                            "selected_recall": r_s[1], "selected_f1": f1_s[1],
                            "init_entrophy": entrophy.mean(),
                            "selected_entrophy": entrophy[valid_idxs].mean()
                            }}, step=train_iter)

BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.68.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
               ]

BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.71.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
               ]


# log_dir = str(__file__).rstrip(".py")
log_dir = "./MetaSelfTrain_0"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

few_shot_cnt = 100
fitlog.set_log_dir("%s/" % log_dir, new_log=True)
domain_ID = 1
new_domain, old_domain, few_shot_set = obtain_Domain_set(
                                            f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}"
                                        )
Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                    for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

# new_domain = Merge_data(few_shot_set, new_domain)
model = obtain_model(tv)
new_domain_name = new_domain.data[new_domain.data_ID[0]]['event']
new_domain_label = torch.tensor(new_domain.data_y).argmax(dim=1)
model.load_model(BiGCN1_Paths[domain_ID])

visual = featureVisualization(few_shot_set, 20)
visual(model, 0)
ContrastivePretrain(model, new_domain, new_domain_label,
           20, 64, list(range(len(new_domain))), visual)
model.save_model(f"{log_dir}/model1_{new_domain_name}.pth")


