import sys
sys.path.append("..")
sys.path.append("../..")
from Dataloader.twitterloader import SentiReader
from Dataloader.dataloader_utils import shuffle_data, Sort_data
from Dataloader.twitterloader import TwitterSet
from SentModel.Sent2Vec import W2VRDMVec
from PropModel.SeqPropagation import GRUModel
from RumdetecFramework.BaseRumorFramework import SubjEnhancedFramework
import os
import torch.nn as nn


def obtain_general_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

def obtain_model():
    sent2vec1 = W2VRDMVec("../../saved/glove_en/", 300)
    sent2vec2 = W2VRDMVec("../../saved/glove_en/", 300)
    prop = GRUModel(300, 256, 1, 0.2)
    rdm_cls = nn.Sequential(
        nn.Linear(256, 512),
        nn.ReLU(),
        nn.Linear(512, 2)
    )
    model = SubjEnhancedFramework(sent2vec1, sent2vec2, prop, rdm_cls, batch_size=20, grad_accum_cnt=1)
    return model


subj_tr = SentiReader("../../data/sub_train.csv")
subj_dev = SentiReader("../../data/sub_dev.csv")
subj_te = SentiReader("../../data/sub_test.csv")

log_dir = str(__file__).rstrip(".pkl")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)


for t in range(10):
    i = 1
    tr, dev, te = obtain_general_set("../../data/twitter_tr%d" % i,
                                     "../../data/twitter_dev%d" % i,
                                     "../../data/twitter_te%d" % i)
    tr, dev, te = Sort_data(tr, dev, te)
    tr.filter_short_seq(min_len=5)
    tr.ResortSample(6)
    model = obtain_model()
    model.joint_train_iters(tr, dev, te, subj_tr, subj_dev, subj_te,
                            valid_every=100, max_epochs=100, lr_discount=1.0,
                            best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                            log_dir=log_dir, log_suffix="Sort", model_file="../../saved/SubRDM_Sort.pkl")

    tr, dev, te = obtain_general_set("../../data/twitter_tr%d" % i,
                                     "../../data/twitter_dev%d" % i,
                                     "../../data/twitter_te%d" % i)
    tr, dev, te = shuffle_data(tr, dev, te)
    tr.filter_short_seq(min_len=5)
    tr.ResortSample(6)
    model = obtain_model()
    model.joint_train_iters(tr, dev, te, subj_tr, subj_dev, subj_te,
                            valid_every=100, max_epochs=100, lr_discount=1.0,
                            best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                            log_dir=log_dir, log_suffix="General", model_file="../../saved/SubRDM_General.pkl")

    for i in range(5):
        tr, dev, te = obtain_general_set("../../data/twitter_tr%d"%i,
                                         "../../data/twitter_dev%d"%i,
                                         "../../data/twitter_te%d"%i)
        tr, dev, te = Sort_data(tr, dev, te)
        tr.filter_short_seq(min_len=5)
        tr.ResortSample(6)
        test_event_name = te.data[te.data_ID[0]]['event']
        model = obtain_model()
        model.joint_train_iters(tr, dev, te, subj_tr, subj_dev, subj_te,
                            valid_every=100, max_epochs=100, lr_discount=1.0,
                            best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                            log_dir=log_dir, log_suffix=test_event_name,
                            model_file="../../saved/SubRDM_%s.pkl"%test_event_name)