import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.weiboloader import TreeWeiboSet, topics
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from PropModel.GraphPropagation import TD_Transformer, BU_Transformer
from SentModel.Sent2Vec import W2V_Transformer, BertVec
from RumdetecFramework.GraphRumorDect import TransformerRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from sklearn.metrics import accuracy_score, precision_score, \
            recall_score, f1_score,precision_recall_fscore_support
import pickle
import torch
import torch.nn as nn
import os

def pred_Logits(model:RumorDetection, data, idxs=None, batch_size=20):
    preds = []
    if idxs is None:
        idxs = list(range(len(data)))
    with torch.no_grad():
        for i in range(0, len(idxs), batch_size):
            batch_idxs = idxs[i:min(len(idxs), i+batch_size)]
            batch = data.collate_raw_batch([data[idx] for idx in batch_idxs])
            pred = model.forward(batch)
            preds.append(pred)
    pred_tensor = torch.cat(preds)
    return pred_tensor

def prediction(model:RumorDetection, data, idxs=None, batch_size=20):
    pred_tensor = pred_Logits(model, data, idxs, batch_size)
    vals, idxs = pred_tensor.sort(dim=1)
    return idxs[:, 1], vals[:, 1]

def expandPseaudoSet(model1, model2, unlabeled, skip_idxs=None, threshold=0.95, max_cnt=50):
    if skip_idxs is None:
        c_idxs = list(range(len(unlabeled)))
    else:
        c_idxs = list(set(range(len(unlabeled))) - set(skip_idxs))
    pred_1, conf_1 = prediction(model1, unlabeled, c_idxs)
    pred_2, conf_2 = prediction(model2, unlabeled, c_idxs)
    pred_eq = (pred_1 - pred_2).abs().__eq__(0)
    valid_conf_1 = conf_1.__gt__(threshold) & pred_eq
    valid_conf_2 = conf_2.__gt__(threshold) & valid_conf_1
    expand_idxs = torch.tensor(c_idxs, device=valid_conf_2.device)[valid_conf_2]
    if len(expand_idxs) > max_cnt:
        conf_f1 = 2*conf_2*conf_1/(conf_2+conf_1)
        sort_idxs = conf_f1[valid_conf_2].argsort()[-max_cnt:]
        expand_idxs = expand_idxs[sort_idxs].tolist()
    else:
        expand_idxs = expand_idxs.tolist()
    return expand_idxs

def acc_P_R_F1(y_true, y_pred):
    return accuracy_score(y_true, y_pred.cpu()), \
                precision_recall_fscore_support(y_true, y_pred.cpu())

def Perf(model:RumorDetection, data, label, idxs=None, batch_size=20):
    y_pred, _ = prediction(model, data, idxs=idxs, batch_size=batch_size)
    y_true = label[idxs] if idxs is not None else label
    return acc_P_R_F1(y_true, y_pred)

def obtain_model():
    # sentModel = W2V_Transformer(w2v_dir="../../saved/glove_en/",
    #                             config_file="./SentTransformer.json",
    #                             emb_update=False)
    sentModel = BertVec(bert_dir="../../../bert_en/",
                        bert_parallel=False, para_update=False)
    prop_model = TD_Transformer("./PropTransformer.json")
    cls = nn.Linear(768, 2)
    model = TransformerRumorDetec(sentModel, prop_model, cls, batch_size=5, grad_accum_cnt=4)
    return model

def obtain_Domain_set(fs_prefix, od_prefix, nd_prefix, max_seq_len=20):
    fs_set = TreeWeiboSet()
    fs_set.load_data_fast(data_prefix=fs_prefix)
    od_set = TreeWeiboSet()
    od_set.load_data_fast(data_prefix=od_prefix)
    nd_set = TreeWeiboSet()
    nd_set.load_data_fast(data_prefix=nd_prefix)
    od_set.trim_long_seq(max_seq_len)
    nd_set.trim_long_seq(max_seq_len)
    fs_set.trim_long_seq(max_seq_len)
    return fs_set, od_set, nd_set

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

domain_ID = 1
few_shot_cnt = 100

dev, tr, te = obtain_Domain_set("../../data/weibo_tr_%s" %topics[domain_ID],
                              "../../data/weibo_dev_%s" %topics[domain_ID],
                              "../../data/weibo_te_%s" %topics[domain_ID])
test_event_name = topics[domain_ID]
print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (
test_event_name, len(dev), len(te), len(tr)))
print("\n\n===========%s Train===========\n\n"%test_event_name)
model = obtain_model()
model.train_iters(tr, dev, te, max_epochs=20,
                  log_dir=log_dir, log_suffix=test_event_name,
                  model_file="../../saved/TD_Transformer_%s.pkl" %test_event_name)
