import sys
sys.path.append("..")
sys.path.append("../..")
from RumdetecFramework.BaseRumorFramework import SubjEnhancedFramework
from Dataloader.twitterloader import TwitterSet, SentiReader
from SentModel.Sent2Vec import W2VRDMVec
from Dataloader.dataloader_utils import Lemma_Factory, Sort_data, shuffle_data
from SentModel.Sent2Vec import TFIDFBasedVec
from PropModel.SeqPropagation import GRUModel
import torch.nn as nn
import pickle
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
import os

if len(sys.argv) !=1:
    os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]

def obtain_LSTMRD(pretrained_vectorizer):
    lvec = TFIDFBasedVec(pretrained_vectorizer, 20, embedding_size=300,
                         w2v_dir="../../saved/glove_en/", emb_update=True)
    sent2vec2 = W2VRDMVec("../../saved/glove_en/", 300)
    prop = GRUModel(300, 256, 1, 0.2)
    rdm_cls = nn.Sequential(
        nn.Linear(256, 512),
        nn.ReLU(),
        nn.Linear(512, 2)
    )
    model = SubjEnhancedFramework(lvec, sent2vec2, prop, rdm_cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Sort_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

model_Paths = [""]
i = 2
tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d"%i, "../../data/twitter_dev%d"%i, "../../data/twitter_te%d"%i)

Tf_Idf_twitter_file = "../../saved/TfIdf_twitter.pkl"
if os.path.exists(Tf_Idf_twitter_file):
    with open(Tf_Idf_twitter_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [tr, dev, te] for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_twitter_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)


subj_tr = SentiReader("../../data/sub_tr_EN.csv")
subj_dev = SentiReader("../../data/sub_dev_EN.csv")
subj_te = SentiReader("../../data/sub_te_EN.csv")

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

tr, dev, te = obtain_Sort_set("../../data/twitter_tr_sort",
                              "../../data/twitter_dev_sort",
                              "../../data/twitter_te_sort")
print("\n\n===========Sort Test===========\n\n")
model = obtain_LSTMRD(tv)
te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
pkl_list = ["../../saved/"+pkl_file for pkl_file in os.listdir("../../saved")
                                    if (("TFIDF_SITER-GRURD_Sort" in pkl_file) and
                                        ("sent.pkl" not in pkl_file) and
                                        ("_Weibo_" not in pkl_file))]
for pkl_file in pkl_list:
    print("\n" + pkl_file + " : ")
    try:
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
    except FileNotFoundError:
        model.sent2vec.emb_update = False
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        model.sent2vec.emb_update = True
    print(
        "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)

tr, dev, te = obtain_Sort_set("../../data/twitter_tr",
                              "../../data/twitter_dev",
                              "../../data/twitter_te")
print("\n\n===========General Test===========\n\n")
model = obtain_LSTMRD(tv)
te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
pkl_list = ["../../saved/"+pkl_file for pkl_file in os.listdir("../../saved")
                                    if (("TFIDF_SITER-GRURD_General" in pkl_file) and
                                        ("sent.pkl" not in pkl_file) and
                                        ("_Weibo_" not in pkl_file))]
for pkl_file in pkl_list:
    print("\n" + pkl_file + " : ")
    try:
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
    except FileNotFoundError:
        model.sent2vec.emb_update = False
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        model.sent2vec.emb_update = True
    print(
        "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)

for i in range(5):
    tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d" % i,
                                  "../../data/twitter_dev%d" % i,
                                  "../../data/twitter_te%d" % i)
    test_event_name = te.data[te.data_ID[0]]['event']
    test_file = "TFIDF_SITER-GRURD_%s" % test_event_name
    print("\n\n===========%s Test===========\n\n"%te.data[te.data_ID[0]]['event'])
    model = obtain_LSTMRD(tv)
    te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
    pkl_list = ["../../saved/" + pkl_file for pkl_file in os.listdir("../../saved")
                if ((test_file in pkl_file) and
                    ("sent.pkl" not in pkl_file) and
                    ("_Weibo_" not in pkl_file))]
    for pkl_file in pkl_list:
        print("\n" + pkl_file + " : ")
        try:
            rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        except FileNotFoundError:
            model.sent2vec.emb_update = False
            rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
            model.sent2vec.emb_update = True
        print(
            "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)