import sys
sys.path.append("../../")
sys.path.append("../")
from Dataloader.dataloader_utils import Sample_data, Merge_data, Lemma_Factory
from Dataloader.twitterloader import TwitterSet, BiGCNTwitterSet
from SentModel.Sent2Vec import TFIDFBasedVec, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
from PropModel.SeqPropagation import GRUModel
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from RumdetecFramework.BaseRumorFramework import RumorDetection
from RumdetecFramework.TransferRumorFramework import InstanceReweighting
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
import torch
import torch.nn as nn
import os
import fitlog

def WeakLabeling(model:RumorDetection, data, convert_fn=None, batch_size=20):
    if convert_fn is not None:
        data = convert_fn(data)
    data_loader = DataLoader(data,
                             batch_size=batch_size,
                             shuffle=False,
                             collate_fn=data.collate_raw_batch)
    preds = []
    with torch.no_grad():
        for batch in data_loader:
            pred = model.forward(batch)
            preds.append(pred)
    weak_label = (torch.cat(preds) > 0.5).long().tolist()
    data.data_y = weak_label
    pred_tensor = torch.cat(preds)
    entrophy = (pred_tensor.log().abs() * pred_tensor).sum(dim=1)
    return entrophy

def obtain_Domain_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    new_domain = Merge_data(dev_set, te_set)
    return tr_set, new_domain

def Convert_2_BiGCNFormat(data):
    new_data = BiGCNTwitterSet()
    new_data.data = data.data
    new_data.data_ID = data.data_ID
    new_data.data_len = data.data_len
    new_data.data_y = data.data_y
    return new_data


RDM_Paths = {
    "charliehebdo": "../../saved/RDM_charliehebdo_0.83.pkl",
    "ferguson": "../../saved/RDM_ferguson_0.78.pkl",
    "germanwings-crash": "../../saved/RDM_germanwings-crash_0.70.pkl",
    "ottawashooting": "../../saved/RDM_ottawashooting_0.69.pkl",
    "sydneysiege": "../../saved/RDM_sydneysiege_0.72.pkl"
}

BiGCN_Paths = {
    "charliehebdo": "../../saved/TFIDF_BiGCN_charliehebdo_0.80.pkl",
    "ferguson": "../../saved/TFIDF_BiGCN_ferguson_0.74.pkl",
    "germanwings-crash": "../../saved/TFIDF_BiGCN_germanwings-crash_0.75.pkl",
    "ottawashooting": "../../saved/TFIDF_BiGCN_ottawashooting_0.69.pkl",
    "sydneysiege": "../../saved/TFIDF_BiGCN_sydneysiege_0.72.pkl"
}

Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    i = 1
    tr, dev = obtain_Domain_set("../../data/twitter_tr%d" % i, "../../data/twitter_dev%d" % i,
                                  "../../data/twitter_te%d" % i)
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [tr, dev] for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

lvec = TFIDFBasedVec(tv, 20, embedding_size=300, w2v_dir="../../saved/glove_en/")
prop = BiGCN(300, 256)
cls = nn.Linear(1024, 2)
BiGCN_model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)

lvec = W2VRDMVec("../../saved/glove_en/", 300, seg=None, emb_update=False)
prop = GRUModel(300, 256, 1, 0.2)
cls = nn.Linear(256, 2)
RDM = RumorDetection(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
few_shot_cnt = 50

for i in range(5):
    old_domain, new_domain = obtain_Domain_set("../../data/twitter_tr%d"%i,
                                               "../../data/twitter_dev%d"%i,
                                               "../../data/twitter_te%d"%i)
    new_domain_label = new_domain.data_y
    leakage_frac = few_shot_cnt*1.0/len(new_domain)
    few_shot_set, new_domain = new_domain.split(percent=[leakage_frac, 1.0])
    new_domain_name = new_domain.data[new_domain.data_ID[0]]['event']

    BiGCN_model.load_model(BiGCN_Paths[i])
    Bi_new_domain = Convert_2_BiGCNFormat(new_domain)
    new_domain_loader = DataLoader(Bi_new_domain, batch_size=20, shuffle=False,
                                   collate_fn=Bi_new_domain.collate_raw_batch)
    rst_BiGCN = BiGCN_model.valid(new_domain_loader, all_metrics=True)
    fitlog.add_best_metric({"BiGCN_Original_%s"%new_domain_name:
                                            {"valid_acc": rst_BiGCN[0],
                                             "valid_loss": rst_BiGCN[1],
                                             "valid_prec": rst_BiGCN[2],
                                             "valid_recall": rst_BiGCN[3],
                                             "valid_f1": rst_BiGCN[4]
                                               }})
    RDM.load_model(RDM_Paths[i])
    new_domain_loader = DataLoader(new_domain, batch_size=20, shuffle=False,
                                   collate_fn=new_domain.collate_raw_batch)
    rst_RDM = RDM.valid(new_domain_loader, all_metrics=True)
    fitlog.add_best_metric({"RDM_Original_%s"%new_domain_name:
                                        {"valid_acc": rst_RDM[0],
                                        "valid_loss": rst_RDM[1],
                                        "valid_prec": rst_RDM[2],
                                        "valid_recall": rst_RDM[3],
                                        "valid_f1": rst_RDM[4]
                                               }})
    for epoch in range(30):
        e_arr = WeakLabeling(RDM, new_domain)
        IR_weighting = InstanceReweighting(BiGCN_model, new_domain, few_shot_set, new_domain_label,
                                           convey_fn=Convert_2_BiGCNFormat,
                                           weak_set_weights= 1.0 / e_arr.cpu().numpy(), lr4model=2e-2, batch_size=20)

        IR_weighting.Meta_Training(max_iters=1000, model_file="../../saved/MetaBiGCN_%s.pkl"%new_domain_name)
        BiGCN_model.load_model("../../saved/MetaBiGCN_%s.pkl"%new_domain_name)
        rst_BiGCN = IR_weighting.Valid(all_metrics=True)
        fitlog.add_metric({"BiGCN_%s"%new_domain_name:
                                    {"valid_acc": rst_BiGCN[0],
                                     "valid_loss": rst_BiGCN[1],
                                     "valid_prec": rst_BiGCN[2],
                                     "valid_recall": rst_BiGCN[3],
                                     "valid_f1": rst_BiGCN[4]
                                    }},
                          step=epoch
                          )

        e_arr = WeakLabeling(BiGCN_model, new_domain)
        IR_weighting = InstanceReweighting(RDM, new_domain, few_shot_set, new_domain_label,
                                           convey_fn=None,
                                           weak_set_weights= 1.0 / e_arr.cpu().numpy(), lr4model=2e-2, batch_size=20)
        IR_weighting.Meta_Training(max_iters=1000, model_file="../../saved/MetaRDM_%s.pkl"%new_domain_name)

        RDM.load_model("../../saved/MetaBiGCN_%s.pkl"%new_domain_name)
        rst_RDM = IR_weighting.Valid(all_metrics=True)
        fitlog.add_metric({"RDM_%s"%new_domain_name:
                                    {"valid_acc": rst_RDM[0],
                                    "valid_loss": rst_RDM[1],
                                    "valid_prec": rst_RDM[2],
                                    "valid_recall": rst_RDM[3],
                                    "valid_f1": rst_RDM[4]
                                    }},
                          step=epoch
                          )
