import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.InstanceReweighting import InstanceReweighting
import copy
from RumdetecFramework.InstanceReweighting import update_current_devices, to_var
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
from typing import Tuple, List, Callable, AnyStr
import pickle
import torch
import torch.nn as nn
import random
import os
import fitlog

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def WeakLabeling(model:RumorDetection, data, logits=None, batch_size=20):
    if logits is not None:
        c_idxs = torch.arange(len(data))[logits.__ne__(1.0)].tolist()
    else:
        c_idxs = torch.arange(len(data)).tolist()
    pred_tensor = pred_Logits(model, data, idxs=c_idxs, batch_size=batch_size)
    confs, preds = pred_tensor.sort(dim=1)
    weak_label = (pred_tensor > 0.5).long().tolist()
    for i, idx in enumerate(c_idxs):
        data.data_y[idx] = weak_label[i]
    entrophy = torch.zeros([len(data)], device=model.device)
    entrophy[c_idxs] = (confs.log().abs() * confs).sum(dim=1)
    if logits is not None:
        logits[logits.__ne__(1.0)] = confs[:, 1]
        return entrophy, preds[:, 1], logits
    else:
        return entrophy, preds[:, 1], confs[:, 1]

def obtain_model(tfidf_vec) -> BiGCNRumorDetec:
    lvec = TFIDFBasedVec(tfidf_vec, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix) -> Tuple[TwitterSet, TwitterSet, TwitterSet]:
    fs_set = BiGCNTwitterSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = BiGCNTwitterSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = BiGCNTwitterSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    return fs_set, od_set, nd_set

def Convert_2_BiGCNFormat(data) -> BiGCNTwitterSet:
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data

def BiGCNEvaluater(data_set:BiGCNTwitterSet, label):
    def evaluater(model:BiGCNRumorDetec):
        acc, p_r_f1 = Perf(model, data_set, label)
        return acc, p_r_f1[0][1], p_r_f1[1][1], p_r_f1[2][1]
    return evaluater

def balancedTrainingIter(weak_set, batch_size, valid_idxs=None):
    labels = torch.tensor(weak_set.data_y).argmax(dim=1)[valid_idxs]
    valid_idxs = valid_idxs if isinstance(valid_idxs, torch.Tensor) else torch.tensor(valid_idxs)
    pos_idxs = valid_idxs[labels.__eq__(1)].tolist()
    neg_idxs = valid_idxs[labels.__eq__(0)].tolist()
    if len(pos_idxs) > len(neg_idxs):
        max_size = len(pos_idxs)
        neg_idxs = neg_idxs * (len(pos_idxs) // len(neg_idxs)) + \
                   random.sample(neg_idxs, len(pos_idxs) % len(neg_idxs))
    else:
        max_size = len(neg_idxs)
        pos_idxs = pos_idxs * (len(neg_idxs) // len(pos_idxs)) + \
                   random.sample(pos_idxs, len(neg_idxs) % len(pos_idxs))
    for i in range(0, max_size, batch_size // 2):
        training_idxs = pos_idxs[i:min(max_size, i + batch_size // 2)] + \
                        neg_idxs[i:min(max_size, i + batch_size // 2)]
        yield weak_set.collate_raw_batch([weak_set[j] for j in training_idxs])

def DistillationLoss(Lambda):
    def loss_func(label, confidence, preds):
        preds = (preds - 1e-8).abs()
        confidence = label*torch.stack([confidence, confidence]).T
        soft_label = (Lambda)*confidence + (1-Lambda)*label
        loss_arr = -1*soft_label*preds.log()
        loss = loss_arr.sum(dim=1).mean()
        return loss
    return loss_func

def ModelTrain(model:RumorDetection, weak_set:TwitterSet, weak_set_label:torch.Tensor, logits:torch.Tensor,
               max_epoch:int, batch_size:int, train_indices:List, loss_func, evaluator=None):
    labels, pseaudo_labels = weak_set_label.to(model.device), torch.tensor(weak_set.data_y, device=model.device)
    print("trainSet perf:", acc_P_R_F1(labels[train_indices].cpu(), pseaudo_labels.argmax(dim=1)[train_indices].cpu()))
    model_optim = torch.optim.Adam([
                {'params': model.sent2vec.parameters(), 'lr': 5e-5},
                {'params': model.prop_model.parameters(), 'lr': 1e-4},
                {'params': model.rdm_cls.parameters(), 'lr': 1e-4}
            ]
        )
    for epoch in range(max_epoch):
        start = 0
        sum_loss = 0.
        train_indices = random.sample(train_indices, len(train_indices))
        for t_idx in range(0, len(train_indices), batch_size):
            batch_idxs = train_indices[t_idx:min(t_idx+batch_size, len(train_indices))]
            batch = weak_set.collate_raw_batch([ weak_set[batch_idx]
                                                    for batch_idx in batch_idxs])
            preds = model.predict(batch)
            loss = loss_func(pseaudo_labels[batch_idxs], logits[batch_idxs], preds)
            model.zero_grad()
            model_optim.zero_grad()
            loss.backward()
            model_optim.step()
            torch.cuda.empty_cache()
            print('####Model Update (%3d | %3d) ####, loss = %6.8f' % (
                start, len(train_indices), loss.data.mean()
            ))
            sum_loss += loss.data
            start += batch_size
        mean_loss = (sum_loss * 1.0) / ((len(train_indices) // batch_size) + 1)
        print("mean loss:", mean_loss)
        if mean_loss < 0.2:  # early stop
            break
        elif evaluator is not None:
            val_acc, prec, rec, f1 = evaluator(model)
            print(
                '##### %6d | %6d, val_acc/val_prec/val_rec/val_f1 = %6.8f/%6.7f/%6.7f/%6.7f' % (
                    epoch, max_epoch,
                    val_acc, prec, rec, f1)
            )
        else:
            pass

def SelfTrain(tr_model, weak_set, pseudo_logits, weak_set_label, tr_model_suffix, train_iter):
    flags = pseudo_logits.__ge__(0.9)
    valid_idxs = torch.arange(0, len(weak_set), 1)[flags].tolist()
    loss_fn = DistillationLoss(0.8)
    ModelTrain(tr_model, weak_set, weak_set_label, pseudo_logits,
                1, 32, valid_idxs, loss_fn, evaluator = None)
    rst_model1 = Perf(tr_model, weak_set, weak_set_label)
    print(f"{train_iter} : Post-MetaTrain Performance of model1:", rst_model1)
    pseaudo_labels = torch.tensor(weak_set.data_y).argmax(dim=1)
    acc_s, (p_s, r_s, f1_s, _) = acc_P_R_F1(weak_set_label[valid_idxs],
                                            pseaudo_labels[valid_idxs])
    fitlog.add_metric({f"{tr_model_suffix}":
                           {"valid_acc": rst_model1[0],
                            "valid_prec": rst_model1[1][0][1],
                            "valid_recall": rst_model1[1][1][1],
                            "valid_f1": rst_model1[1][2][1],
                            "selected_num": len(valid_idxs) * 1.0 / len(weak_set),
                            "selected_acc": acc_s, "selected_prec": p_s[1],
                            "selected_recall": r_s[1], "selected_f1": f1_s[1],
                            "init_entrophy": entrophy.mean(),
                            "selected_entrophy": entrophy[valid_idxs].mean()
                            }}, step=train_iter)

BiGCN1_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.81.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.68.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.70.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.68.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.67.pkl"
               ]

BiGCN2_Paths = ["../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
               "../../saved/TFIDF_BiGCN_ferguson_0.71.pkl",
               "../../saved/TFIDF_BiGCN_germanwings-crash_0.68.pkl",
               "../../saved/TFIDF_BiGCN_ottawashooting_0.70.pkl",
               "../../saved/TFIDF_BiGCN_sydneysiege_0.66.pkl"
               ]


log_dir = str(__file__).rstrip(".py")
# log_dir = "MetaSelfTrain_0"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

few_shot_cnt = 100
fitlog.set_log_dir("%s/" % log_dir, new_log=True)
domain_ID = 4
few_shot_set, old_domain, new_domain = obtain_Domain_set(
                                            f"../../data/twitter_fs{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_od{domain_ID}_{few_shot_cnt}",
                                            f"../../data/twitter_nd{domain_ID}_{few_shot_cnt}"
                                        )
Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [few_shot_set, old_domain, new_domain]
                                    for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

new_domain = Merge_data(few_shot_set, new_domain)
model = obtain_model(tv)
new_domain_name = new_domain.data[new_domain.data_ID[0]]['event']
new_domain_label = torch.tensor(new_domain.data_y).argmax(dim=1)
model.load_model(BiGCN1_Paths[domain_ID])

###======select hard samples===========###
entrophy, preds, logits = WeakLabeling(model, new_domain)
hard_idxs = logits.argsort()[:few_shot_cnt]
logits[hard_idxs] = 1.0
hard_labels = new_domain_label[hard_idxs]
hard_labels = torch.stack([1-hard_labels, hard_labels]).T.tolist()
for i, idx in enumerate(hard_idxs):
    new_domain.data_y[idx] = hard_labels[i]
###==================================###

threshold = 0.7
for iterate in range(100):
    if iterate != 0:
        model.load_model(f"./{log_dir}/model1_{new_domain_name}")
    SelfTrain(model, new_domain, logits, new_domain_label, f"model1_{new_domain_name}", iterate+1)
    model.save_model(f"./{log_dir}/model1_{new_domain_name}")
    entrophy, preds, logits = WeakLabeling(model, new_domain, logits)

    if iterate == 0:
        model.load_model(BiGCN2_Paths[domain_ID])
    else:
        model.load_model(f"./{log_dir}/model2_{new_domain_name}")
    SelfTrain(model, new_domain, logits, new_domain_label, f"model1_{new_domain_name}", iterate+1)
    model.save_model(f"./{log_dir}/model2_{new_domain_name}")
    if iterate != 99:
        entrophy, preds, logits = WeakLabeling(model, new_domain, logits)




