import sys
sys.path.append("..")
sys.path.append("../..")
from RumdetecFramework.GraphRumorDect import RvNNRumorDetec
from Dataloader.weiboloader import TreeWeiboSet, topics
from Dataloader.dataloader_utils import shuffle_data, Lemma_Factory, Sort_data
from SentModel.Sent2Vec import TFIDFBasedVec_CN
from PropModel.GraphPropagation import TD_RvNN
import torch.nn as nn
import pickle
import pandas as pd
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
import os

if len(sys.argv) !=1:
    os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]

def obtain_model(pretrained_vectorizer):
    lvec = TFIDFBasedVec_CN(pretrained_vectorizer, 20, embedding_size=300,
                            w2v_file="../../word2vec_CN_WeiboBi.pkl", emb_update=True)
    prop = TD_RvNN(300, 256)
    cls = nn.Linear(256, 2)
    LSTMRD = RvNNRumorDetec(lvec, prop, cls, batch_size=20, grad_accum_cnt=1)
    return LSTMRD

def obtain_Sort_set(test_topic_index, prefix_tuple=None):
    if prefix_tuple is None:
        tr_prefix, dev_prefix, te_prefix = "../../data/weibo_tr_%s" % topics[test_topic_index], \
                                           "../../data/weibo_dev_%s" % topics[test_topic_index],\
                                           "../../data/weibo_te_%s" % topics[test_topic_index]
    else:
        tr_prefix, dev_prefix, te_prefix = prefix_tuple

    tr_set = TreeWeiboSet()
    dev_set = TreeWeiboSet()
    te_set = TreeWeiboSet()
    if os.path.exists("%s_dict.txt"%tr_prefix) and \
        os.path.exists("%s_dict.txt"%dev_prefix) and \
         os.path.exists("%s_dict.txt"%te_prefix):
            tr_set.load_data_fast(data_prefix=tr_prefix)
            dev_set.load_data_fast(data_prefix=dev_prefix)
            te_set.load_data_fast(data_prefix=te_prefix)
    else:
        big_df = pd.read_csv("../../data/weibo_ids.csv")
        dev_te = big_df[big_df['category'] == 1]
        dev_df = dev_te.iloc[:len(dev_te)//2]
        te_df = dev_te.iloc[len(dev_te) // 2:]
        tr_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=big_df[big_df['category']!=1],
                         cached_prefix=tr_prefix)
        dev_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=dev_df,
                         cached_prefix=dev_prefix)
        te_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=te_df,
                         cached_prefix=te_prefix)
    return tr_set, dev_set, te_set

model_Paths = [""]
Tf_Idf_weibo_file = "../../saved/TfIdf_weibo.pkl"
if os.path.exists(Tf_Idf_weibo_file):
    with open(Tf_Idf_weibo_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    lemma = Lemma_Factory()
    i = 2
    tr, dev, te = obtain_Sort_set(i)
    corpus = [" ".join(lemma(txt)) for data in [tr, dev, te] for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_weibo_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

for t in range(10):
    i = 2
    tr, dev, te = obtain_Sort_set(i,prefix_tuple=(
        "../../data/weibo_tr_sort", "../../data/weibo_dev_sort", "../../data/weibo_te_sort"
    ))
    print("Sort : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (len(dev), len(te), len(tr)))
    print("\n\n===========Sort Train===========\n\n")
    model = obtain_model(tv)
    model.train_iters(tr, dev, te, max_epochs=20,
                    log_dir=log_dir, log_suffix="Sort",
                    model_file="../../saved/TFIDF_TDRvNN_Weibo_Sort.pkl")
    te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
    rst = model.valid(te_loader, pretrained_file="../../saved/TFIDF_TDRvNN_Weibo_Sort.pkl", all_metrics=True)
    print("##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f"%rst)
    print("General : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (
            len(dev), len(te), len(tr)))
    print("\n\n===========General Train===========\n\n")
    tr, dev, te = obtain_Sort_set(i, prefix_tuple=(
        "../../data/weibo_tr", "../../data/weibo_dev", "../../data/weibo_te"
    ))
    model = obtain_model(tv)
    model.train_iters(tr, dev, te, max_epochs=20,
                      log_dir=log_dir, log_suffix="General",
                      model_file="../../saved/TFIDF_TDRvNN_Weibo_General.pkl")
    te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
    rst = model.valid(te_loader, pretrained_file="../../saved/TFIDF_TDRvNN_Weibo_General.pkl", all_metrics=True)
    print(
        "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)

    for i in [1, 5, 6]:
        tr, dev, te = obtain_Sort_set(i)
        print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (
                topics[i], len(dev), len(te), len(tr)))
        print("\n\n===========%s Train===========\n\n"%topics[i])
        model = obtain_model(tv)
        model.train_iters(tr, dev, te, max_epochs=20,
                          log_dir=log_dir, log_suffix=topics[i],
                          model_file="../../saved/TFIDF_TDRvNN_Weibo_%s.pkl" %topics[i])
