import sys
sys.path.append("..")
sys.path.append("../..")
from Dataloader.weiboloader import WeiboSet, topics
from Dataloader.twitterloader import SentiReader
from RumdetecFramework.BaseRumorFramework import SubjEnhancedFramework
from SentModel.Sent2Vec import W2VRDMVec
from PropModel.SeqPropagation import GRUModel
import pkuseg
import os
import pandas as pd
import torch.nn as nn
from torch.utils.data import DataLoader

if len(sys.argv) !=1:
    os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]

def obtain_RDM():
    seg_dict = []
    seg = pkuseg.pkuseg(user_dict=seg_dict)
    sent2vec1 = W2VRDMVec("../../saved/word2vec_cn", 300, seg=seg)
    sent2vec2 = W2VRDMVec("../../saved/word2vec_cn", 300, seg=seg)
    prop = GRUModel(300, 256, 1, 0.2)
    rdm_cls = nn.Sequential(
        nn.Linear(256, 512),
        nn.ReLU(),
        nn.Linear(512, 2)
    )
    model = SubjEnhancedFramework(sent2vec1, sent2vec2, prop, rdm_cls, batch_size=20, grad_accum_cnt=1)
    return model

def obtain_Sort_set(test_topic_index, prefix_tuple=None):
    if prefix_tuple is None:
        tr_prefix, dev_prefix, te_prefix = "../../data/weibo_tr_%s" % topics[test_topic_index], \
                                           "../../data/weibo_dev_%s" % topics[test_topic_index],\
                                           "../../data/weibo_te_%s" % topics[test_topic_index]
    else:
        tr_prefix, dev_prefix, te_prefix = prefix_tuple
    tr_set = WeiboSet()
    dev_set = WeiboSet()
    te_set = WeiboSet()
    if os.path.exists("%s_dict.txt"%tr_prefix) and \
        os.path.exists("%s_dict.txt"%dev_prefix) and \
         os.path.exists("%s_dict.txt"%te_prefix):
            tr_set.load_data_fast(data_prefix=tr_prefix)
            dev_set.load_data_fast(data_prefix=dev_prefix)
            te_set.load_data_fast(data_prefix=te_prefix)
    else:
        big_df = pd.read_csv("../../data/weibo_ids.csv")
        dev_te = big_df[big_df['category'] == 1]
        dev_df = dev_te.iloc[:len(dev_te)//2]
        te_df = dev_te.iloc[len(dev_te) // 2:]
        tr_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=big_df[big_df['category']!=1],
                         cached_prefix=tr_prefix)
        dev_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=dev_df,
                         cached_prefix=dev_prefix)
        te_set.load_data(weibo_dir="../../data/Weibo",
                         weibo_df=te_df,
                         cached_prefix=te_prefix)
    return tr_set, dev_set, te_set

subj_tr = SentiReader("../../data/sub_tr_CN.csv")
subj_dev = SentiReader("../../data/sub_dev_CN.csv")
subj_te = SentiReader("../../data/sub_te_CN.csv")

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

i = 1
tr, dev, te = obtain_Sort_set(i, prefix_tuple=(
    "../../data/weibo_tr_sort", "../../data/weibo_dev_sort", "../../data/weibo_te_sort"
))
print("\n\n===========Sort Test===========\n\n")
model = obtain_RDM()
te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
pkl_list = ["../../saved/"+pkl_file for pkl_file in os.listdir("../../saved")
                                    if (("RDM_Sort" in pkl_file) and
                                        ("sent.pkl" not in pkl_file) and
                                        ("SITER_"  in pkl_file) and
                                        ("_Weibo_" in pkl_file))]
for pkl_file in pkl_list:
    print("\n" + pkl_file + " : ")
    try:
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
    except FileNotFoundError:
        model.sent2vec.emb_update = False
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        model.sent2vec.emb_update = True
    print(
        "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)

tr, dev, te = obtain_Sort_set(i, prefix_tuple=(
    "../../data/weibo_tr", "../../data/weibo_dev", "../../data/weibo_te"
))
print("\n\n===========General Test===========\n\n")
model = obtain_RDM()
te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
pkl_list = ["../../saved/"+pkl_file for pkl_file in os.listdir("../../saved")
                                    if (("RDM_General" in pkl_file) and
                                        ("sent.pkl" not in pkl_file) and
                                        ("SITER_" in pkl_file) and
                                        ("_Weibo_" in pkl_file))]
for pkl_file in pkl_list:
    print("\n" + pkl_file + " : ")
    try:
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
    except FileNotFoundError:
        model.sent2vec.emb_update = False
        rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        model.sent2vec.emb_update = True
    print(
        "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)

for i in [1, 5, 6]:
    tr, dev, te = obtain_Sort_set(i)
    test_file = "RDM_Weibo_%s" %topics[i]
    print("\n\n===========%s Test===========\n\n"%topics[i])
    model = obtain_RDM()
    te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
    pkl_list = ["../../saved/" + pkl_file for pkl_file in os.listdir("../../saved")
                                    if ((test_file in pkl_file) and
                                        ("sent.pkl" not in pkl_file) and
                                        ("SITER_"  in pkl_file) and
                                        ("_Weibo_"  in pkl_file))]
    for pkl_file in pkl_list:
        print("\n" + pkl_file + " : ")
        try:
            rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
        except FileNotFoundError:
            model.sent2vec.emb_update = False
            rst = model.valid(te_loader, pretrained_file=pkl_file, all_metrics=True)
            model.sent2vec.emb_update = True
        print(
            "##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % rst)