import sys
from Dataloader.twitterloader import SentiReader
from Dataloader.dataloader_utils import shuffle_data
from Dataloader.twitterloader import TwitterSet
from SentModel.Sent2Vec import W2VRDMVec
from PropModel.SeqPropagation import GRUModel
from RumdetecFramework.BaseRumorFramework import SubjMTLFrameWork

import torch.nn as nn


def obtain_general_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set



subj_tr = SentiReader("../../data/sub_train.csv")
subj_dev = SentiReader("../../data/sub_dev.csv")
subj_te = SentiReader("../../data/sub_test.csv")

i = 1
tr, dev, te = obtain_general_set("../../data/twitter_tr%d"%i,
                                 "../../data/twitter_dev%d"%i,
                                 "../../data/twitter_te%d"%i)
tr, dev, te = shuffle_data(tr, dev, te)
tr.filter_short_seq(min_len=5)
tr.ResortSample(6)

sent2vec = W2VRDMVec("../../saved/glove_en/", 300)
prop = GRUModel(300, 256, 1, 0.2)
rdm_cls = nn.Linear(256, 2)

model = SubjMTLFrameWork(sent2vec, prop, rdm_cls, batch_size=20, grad_accum_cnt=1)
# model.joint_train_iters(tr, dev, te, subj_tr, subj_dev, subj_te, model_file="tmp.pkl")
model.train_iters(tr, dev, te,
                    valid_every=100, max_epochs=100, lr_discount=1.0,
                    best_valid_acc=0.0, best_test_acc=0.0, best_valid_test_acc=0.0,
                    log_dir="./", log_suffix="_RumorDetection", model_file="SelfAttn_twitter_general.pkl")