import sys
sys.path.append("..")
sys.path.append("../..")
from RumdetecFramework.GraphRumorDect import SITERBiGCN
from Dataloader.weiboloader import BiGCNWeiboSet, topics
from Dataloader.twitterloader import SentiReader
from Dataloader.dataloader_utils import Sort_data, shuffle_data, Lemma_Factory
from SentModel.Sent2Vec import TFIDFBasedVec_CN, W2VRDMVec
from PropModel.GraphPropagation import BiGCN
import torch.nn as nn
import pandas as pd
import pkuseg
from torch.utils.data import DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
import os
import pickle

if len(sys.argv) !=1:
    os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]

def obtain_model(pretrained_vectorizer):
    lvec = TFIDFBasedVec_CN(pretrained_vectorizer, 20, embedding_size=300,
                            w2v_file="../../word2vec_CN_WeiboBi.pkl", emb_update=True)
    prop = BiGCN(300, 256)
    cls = nn.Linear(1024, 2)
    seg_dict = []
    seg = pkuseg.pkuseg(user_dict=seg_dict)
    sent2vec2 = W2VRDMVec("../../saved/word2vec_cn", 300, seg=seg)
    model = SITERBiGCN(lvec, sent2vec2, prop, cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Sort_set(test_topic_index, prefix_tuple=None):
    if prefix_tuple is None:
        tr_prefix, dev_prefix, te_prefix = "../../data/weibo_tr_%s" % topics[test_topic_index], \
                                           "../../data/weibo_dev_%s" % topics[test_topic_index],\
                                           "../../data/weibo_te_%s" % topics[test_topic_index]
    else:
        tr_prefix, dev_prefix, te_prefix = prefix_tuple
    tr_set = BiGCNWeiboSet()
    dev_set = BiGCNWeiboSet()
    te_set = BiGCNWeiboSet()
    if os.path.exists("%s_dict.txt"%tr_prefix) and \
        os.path.exists("%s_dict.txt"%dev_prefix) and \
         os.path.exists("%s_dict.txt"%te_prefix):
            tr_set.load_data_fast(data_prefix=tr_prefix)
            dev_set.load_data_fast(data_prefix=dev_prefix)
            te_set.load_data_fast(data_prefix=te_prefix)
    else:
        big_df = pd.read_csv("../../data/weibo_ids.csv")
        dev_te = big_df[big_df['category'] == 1]
        dev_df = dev_te.iloc[:len(dev_te)//2]
        te_df = dev_te.iloc[len(dev_te) // 2:]
        tr_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=big_df[big_df['category']!=1],
                         cached_prefix=tr_prefix)
        dev_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=dev_df,
                         cached_prefix=dev_prefix)
        te_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=te_df,
                         cached_prefix=te_prefix)
    return tr_set, dev_set, te_set

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

model_Paths = [""]

subj_tr = SentiReader("../../data/sub_tr_CN.csv")
subj_dev = SentiReader("../../data/sub_dev_CN.csv")
subj_te = SentiReader("../../data/sub_te_CN.csv")

Tf_Idf_weibo_file = "../../saved/TfIdf_weibo.pkl"
if os.path.exists(Tf_Idf_weibo_file):
    with open(Tf_Idf_weibo_file, "rb") as fr:
        tv = pickle.load(fr)
else:
    i = 1
    tr, dev, te = obtain_Sort_set(i)
    lemma = Lemma_Factory()
    corpus = [" ".join(lemma(txt)) for data in [tr, dev, te] for ID in data.data_ID for txt in data.data[ID]['text']]
    tv = TfidfVectorizer(use_idf=True, smooth_idf=True, norm=None)
    _ = tv.fit_transform(corpus)
    with open(Tf_Idf_weibo_file, "wb") as fw:
        pickle.dump(tv, fw, protocol=pickle.HIGHEST_PROTOCOL)

i = 1
tr, dev, te = obtain_Sort_set(i, prefix_tuple=(
    "../../data/weibo_tr_sort", "../../data/weibo_dev_sort", "../../data/weibo_te_sort"
))
print("\n\n===========Sort Test===========\n\n")
model = obtain_model(tv)
te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
pkl_list = ["../../saved/" + pkl_file for pkl_file in os.listdir("../../saved")
                                        if (("TFIDF_BiGCN-SITER_Weibo_Sort" in pkl_file) and
                                            ("sent.pkl" not in pkl_file))]
for pkl_file in pkl_list:
    print("\n" + pkl_file + " : ")
    try:
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
    except FileNotFoundError:
        model.sent2vec.emb_update = False
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        model.sent2vec.emb_update = True
    print(
        "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)


print("\n\n===========General Test===========\n\n")
tr, dev, te = obtain_Sort_set(i, prefix_tuple=(
    "../../data/weibo_tr", "../../data/weibo_dev", "../../data/weibo_te"
))
model = obtain_model(tv)
te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
pkl_list = ["../../saved/" + pkl_file for pkl_file in os.listdir("../../saved")
                                        if (("TFIDF_BiGCN-SITER_Weibo_Genera" in pkl_file) and
                                            ("sent.pkl" not in pkl_file))]
for pkl_file in pkl_list:
    print("\n" + pkl_file + " : ")
    try:
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
    except FileNotFoundError:
        model.sent2vec.emb_update = False
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        model.sent2vec.emb_update = True
    print(
        "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)


for i in range(5):
    tr, dev, te = obtain_Sort_set(i)
    test_file = "TFIDF_BiGCN-SITER_Weibo_%s" %topics[i]
    print("\n\n===========%s Test===========\n\n"%topics[i])
    model = obtain_model(tv)
    te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
    pkl_list = ["../../saved/" + pkl_file for pkl_file in os.listdir("../../saved")
                                            if ((test_file in pkl_file) and
                                                ("sent.pkl" not in pkl_file))]
    for pkl_file in pkl_list:
        print("\n" + pkl_file + " : ")
        try:
            rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        except FileNotFoundError:
            model.sent2vec.emb_update = False
            rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
            model.sent2vec.emb_update = True
        print(
            "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)
