import sys
sys.path.append("..")
sys.path.append("../..")
from RumdetecFramework.GraphRumorDect import BiGCNRumorDetec
from Dataloader.twitterloader import BiGCNTwitterSet
from Dataloader.dataloader_utils import Sort_data, shuffle_data, Merge_data
from SentModel.Sent2Vec import TFIDFBasedVec
from PropModel.GraphPropagation import BiGCN
from nltk.stem import WordNetLemmatizer
import torch.nn as nn
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
import os
import torch
import pickle

if len(sys.argv) !=1:
    os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]

def Lemma_Factory():
    lemmatizer = WordNetLemmatizer()
    def lemma(word_tokens):
        tags = nltk.pos_tag(word_tokens)
        new_words = []
        for pair in tags:
            if pair[1].startswith('J'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'a'))
            elif pair[1].startswith('V'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'v'))
            elif pair[1].startswith('N'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'n'))
            elif pair[1].startswith('R'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'r'))
            else:
                new_words.append(pair[0])
        return new_words
    return lemma

def obtain_BiGCN(pretrained_vectorizer):
    lvec = TFIDFBasedVec(pretrained_vectorizer, 20,
                         embedding_size=300,
                         w2v_dir="../../saved/glove_en/",
                         emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    BiGCN_model = BiGCNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return BiGCN_model

def obtain_Domain_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = BiGCNTwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix)
    dev_set = BiGCNTwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = BiGCNTwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    new_domain = Merge_data(dev_set, te_set)
    return tr_set, new_domain

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

with open("../../saved/TfIdf_twitter.pkl", "rb") as fr:
    tv = pickle.load(fr)
# for t in range(2):
#     # i = 1
#     # tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d" % i,
#     #                               "../../data/twitter_dev%d" % i,
#     #                               "../../data/twitter_te%d" % i)
#     # tr, dev, te = Sort_data(tr, dev, te)
#     # print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
#     # print("\n\n===========Sort Train===========\n\n")
#     # model = obtain_BiGCN(tv)
#     # model.train_iters(tr, dev, te, max_epochs=50,
#     #                 log_dir=log_dir, log_suffix="Sort",
#     #                 model_file="../../saved/TFIDF_BiGCN_Sort.pkl")
#     # # te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
#     # # rst = model.valid(te_loader, pretrained_file="../../saved/TFIDF_BiGCN_Sort.pkl", all_metrics=True)
#     # # print("##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f"%rst)
#     # tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d" % i,
#     #                               "../../data/twitter_dev%d" % i,
#     #                               "../../data/twitter_te%d" % i)
#     # tr, dev, te = shuffle_data(tr, dev, te)
#     # print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
#     # print("\n\n===========Sort Train===========\n\n")
#     # model = obtain_BiGCN(tv)
#     # model.train_iters(tr, dev, te, max_epochs=50,
#     #                 log_dir=log_dir, log_suffix="General",
#     #                 model_file="../../saved/TFIDF_BiGCN_Sort.pkl")
#
#     for i in [1, 4]:
#         tr, dev = obtain_Domain_set("../../data/twitter_tr%d" % i, "../../data/twitter_dev%d" % i, "../../data/twitter_te%d" % i)
#         dev, te = dev.BalancedSplit(percent=[0.2, 1.0])
#         test_event_name = te.data[te.data_ID[0]]['event']
#         print("positive instance in dev: %3d / %3d"%(torch.tensor(dev.data_y).argmax(dim=1).sum(), len(dev)))
#         print("positive instance in te: %3d / %3d"%(torch.tensor(te.data_y).argmax(dim=1).sum(), len(te)))
#         print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (
#         test_event_name, len(dev), len(te), len(tr)))
#         print("\n\n===========%s Train===========\n\n"%te.data[te.data_ID[0]]['event'])
#         model = obtain_BiGCN(tv)
#         model.train_iters(tr, dev, te, max_epochs=20,
#                           log_dir=log_dir, log_suffix=test_event_name,
#                           model_file="../../saved/TFIDF_BiGCN_%s.pkl" %test_event_name)

for i in range(5):
    tr, dev, te = obtain_Sort_set("../../data/twitter_tr%d" % i, "../../data/twitter_dev%d" % i, "../../data/twitter_te%d" % i)
    test_event_name = te.data[te.data_ID[0]]['event']
    print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (
    test_event_name, len(dev), len(te), len(tr)))
    print("\n\n===========%s Train===========\n\n"%te.data[te.data_ID[0]]['event'])
    model = obtain_BiGCN(tv)
    model.train_iters(tr, dev, te, max_epochs=20,
                      log_dir=log_dir, log_suffix=test_event_name,
                      model_file="../../saved/TFIDF_BiGCN_%s.pkl" %test_event_name)

