import os
import sys
sys.path.append("..")
sys.path.append("../..")
from Dataloader.dataloader_utils import Merge_data
from Dataloader.twitterloader import TwitterSet
import torch.nn as nn
from SentModel.Sent2Vec import W2VRDMVec
from PropModel.SeqPropagation import GRUModel
from RumdetecFramework.AdverRumorFramework import EnhancedSentAdver
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
import torch

def obtain_general_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

def Entrophy(model, dataset, csv_file):
    def valid(model, data_loader:DataLoader, pretrained_file=None):
        model.eval()
        if pretrained_file is not None and os.path.exists(pretrained_file):
            model.load_model(pretrained_file)
        labels = []
        preds = []
        with torch.no_grad():
            for batch in data_loader:
                pred = model.forward(batch[0])
                torch.cuda.empty_cache()
                preds.append(pred)
                labels.append(batch[2])
            pred_tensor = torch.cat(preds, dim=0)
            label_tensor = torch.cat(labels, dim=0)
        return pred_tensor, label_tensor
    data_frame = pd.DataFrame()
    data_frame['ID'] = np.array(dataset.data_ID)
    data_frame['label'] = np.array(dataset.data_y).argmax(axis=1)
    data_loader = DataLoader(dataset, batch_size=20, shuffle=False,
                            collate_fn=dev.collate_raw_batch)
    p, y = valid(model, data_loader)
    entrophy = (p.log()*p*(-1)).sum(dim=1)
    data_frame['entrophy'] = entrophy.cpu().numpy()
    data_frame['prediction_0'] = p[:, 0].cpu().numpy()
    data_frame['prediction_1'] = p[:, 1].cpu().numpy()
    data_frame['prediction'] = p.cpu().numpy().argmax(axis=1)
    data_frame['ground_truth'] = y.cpu().numpy()
    data_frame.to_csv(csv_file, index=False)
    return data_frame

# log_dir = str(__file__).split(".")[0]
log_dir =  "SentAdverRDM"
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)


best_model_file_dic = {
    "ottawashooting": "SentAdverRDM_ottawashooting_0.66.pkl",
    "sydneysiege":"SentAdverRDM_sydneysiege_0.73.pkl",
    "germanwings-crash":"SentAdverRDM_germanwings-crash_0.67.pkl",
    "ferguson":"SentAdverRDM_ferguson_0.76.pkl",
    "charliehebdo":"SentAdverRDM_charliehebdo_0.83.pkl"
}

for i in range(5):
    tr, dev, te = obtain_general_set("../../data/twitter_tr%d"%i, "../../data/twitter_dev%d"%i, "../../data/twitter_te%d"%i)
    test_event_name = te.data[te.data_ID[0]]['event']
    print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (
                test_event_name, len(dev), len(te), len(tr)))
    print("\n\n===========%s Rumor PreTrain===========\n\n"%te.data[te.data_ID[0]]['event'])

    lvec = W2VRDMVec("../../saved/glove_en/", 300, seg=None, emb_update=False)
    prop = GRUModel(300, 256, 1, 0.2)
    cls = nn.Linear(256, 2)

    model = EnhancedSentAdver(lvec, prop, cls,
                              topic_label_num=5, lr_discount=0.1,
                              batch_size=20, grad_accum_cnt=1)
    model.load_model("../../saved/%s" % best_model_file_dic[test_event_name])
    Entrophy(model, dev, "./%s_entrophy.csv"%test_event_name)

