import sys
sys.path.append("..")
sys.path.append("../..")
from RumdetecFramework.BaseRumorFramework import RumorDetection
from Dataloader.weiboloader import WeiboSet, topics
from Dataloader.dataloader_utils import *
from SentModel.Sent2Vec import *
from PropModel.SeqPropagation import *
from nltk.stem import WordNetLemmatizer
import torch.nn as nn
from torch.utils.data import DataLoader
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
import os
import pickle

if len(sys.argv) !=1:
    os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]

def Lemma_Factory():
    lemmatizer = WordNetLemmatizer()
    def lemma(word_tokens):
        tags = nltk.pos_tag(word_tokens)
        new_words = []
        for pair in tags:
            if pair[1].startswith('J'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'a'))
            elif pair[1].startswith('V'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'v'))
            elif pair[1].startswith('N'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'n'))
            elif pair[1].startswith('R'):
                new_words.append(lemmatizer.lemmatize(pair[0], 'r'))
            else:
                new_words.append(pair[0])
        return new_words
    return lemma

def obtain_LSTMRD(pretrained_vectorizer):
    lvec = TFIDFBasedVec_CN(pretrained_vectorizer, 20, embedding_size=300, w2v_file="../../word2vec_CN_WeiboBi.pkl")
    prop = LSTMModel(sent_hidden_size=300, prop_hidden_size=100, num_layers=1, dropout_prob=0.2)
    cls = nn.Linear(100, 2)
    LSTMRD = RumorDetection(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return LSTMRD

def obtain_Sort_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = WeiboSet()
    tr_set.load_data_fast(data_prefix=tr_prefix)
    dev_set = WeiboSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = WeiboSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

i = 1
tr, dev, te = obtain_Sort_set("../../data/weibo_tr_%s" %topics[i], "../../data/weibo_dev_%s" %topics[i], "../../data/weibo_te_%s" %topics[i])

lemma = Lemma_Factory()
corpus = [" ".join(lemma(txt)) for data in [tr, dev, te] for ID in data.data_ID for txt in data.data[ID]['text']]
tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
_ = tv.fit_transform(corpus)
with open("../../saved/TfIdf_weibo.pkl", "wb") as fw:
    pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

# with open("../../saved/TfIdf_weibo.pkl", "rb") as fr:
#     tv = pickle.load(fr)

tr, dev, te = Sort_data(tr, dev, te)
tr.filter_short_seq(min_len=5)
tr.trim_long_seq(10)
dev.trim_long_seq(20)
te.trim_long_seq(20)


print("Weibo Sort: (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (len(dev), len(te), len(tr)))
print("\n\n===========Sort Train===========\n\n")
model = obtain_LSTMRD(tv)
# model.train_iters(tr, dev, te, max_epochs=100,
#                 log_dir="../../logs/", log_suffix="_LSTMRD_weibo_Sort",
#                 model_file="../../saved/TFIDF_LSTMRD_weibo_Sort.pkl")
te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
rst = model.valid(te_loader, pretrained_file="../../saved/TFIDF_LSTMRD_weibo_Sort.pkl", all_metrics=True)
print("##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f"%rst)
print(rst)

# for i in [1, 5, 6]:
#     tr, dev, te = obtain_Sort_set("../../data/weibo_tr_%s" %topics[i], "../../data/weibo_dev_%s" %topics[i], "../../data/weibo_te_%s" %topics[i])
#     tr.filter_short_seq(min_len=5)
#     tr.trim_long_seq(10)
#     print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (topics[i], len(dev), len(te), len(tr)))
#     print("\n\n===========%s Train===========\n\n"%topics[i])
#     model = obtain_LSTMRD(tv)
#     model.train_iters(tr, dev, te, max_epochs=100,
#                       log_dir="../../logs/", log_suffix="TFIDF_LSTMRD_%s" %topics[i],
#                       model_file="../../saved/TFIDF_LSTMRD_%s.pkl" %topics[i])
