import sys
sys.path.append("..")
sys.path.append("../..")
import os
from RumdetecFramework.BaseRumorFramework import RumorDetection
from Dataloader.twitterloader import *
from Dataloader.dataloader_utils import *
from SentModel.Sent2Vec import *
from PropModel.SeqPropagation import GRUModel
import torch.nn as nn

def obtain_BertRD():
    sent2vec = BertVec("../../../bert_en/", bert_parallel=True)
    prop = GRUModel(768, 256, 1, 0.2)
    cls = nn.Linear(256, 2)
    BertRD = RumorDetection(sent2vec, prop, cls)
    return BertRD

def obtain_general_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

log_dir = str(__file__).rstrip(".py")
if not os.path.exists(log_dir):
    os.system("mkdir %s"%log_dir)
else:
    os.system("rm -rf %s" % log_dir)
    os.system("mkdir %s" % log_dir)

for t in range(10):
    i=1
    tr, dev, te = obtain_general_set("../../data/twitter_tr%d"%i, "../../data/twitter_dev%d"%i, "../../data/twitter_te%d"%i)
    tr, dev, te = Sort_data(tr, dev, te)
    tr.filter_short_seq(min_len=5)
    tr.trim_long_seq(10)
    print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
    model = obtain_BertRD()
    model.train_iters(tr, dev, te, max_epochs=20,
                    log_dir=log_dir, log_suffix="Sort",
                    model_file="../../saved/BertRD_Sort_twitter.pkl")
    # te_loader = DataLoader(te, batch_size=5, shuffle=False, collate_fn=te.collate_raw_batch)
    # rst = model.valid(te_loader, pretrained_file="../../saved/BertRD_Sort_twitter.pkl", all_metrics=True)
    # print("##Validation On Test Dataset####  te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f"%rst)

    tr, dev, te = obtain_general_set("../../data/twitter_tr%d"%i, "../../data/twitter_dev%d"%i, "../../data/twitter_te%d"%i)
    tr, dev, te = shuffle_data(tr, dev, te)
    tr.filter_short_seq(min_len=5)
    tr.trim_long_seq(10)
    print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
    model = obtain_BertRD()
    model.train_iters(tr, dev, te, max_epochs=20,
                    log_dir=log_dir, log_suffix="General",
                    model_file="../../saved/BertRD_General_twitter.pkl")

    for i in range(5):
        tr, dev, te = obtain_general_set("../data/twitter_tr%d"%i, "../data/twitter_dev%d"%i, "../data/twitter_te%d"%i)
        tr.filter_short_seq(min_len=5)
        tr.trim_long_seq(10)
        print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
        print("\n\n===========SubRDM Train===========\n\n")
        model = obtain_BertRD()
        model.train_iters(tr, dev, te, max_epochs=20,
                        log_dir=log_dir, log_suffix=te.data[te.data_ID[0]]['event'],
                        model_file="BertRD_%s.pkl"% (te.data[te.data_ID[0]]['event']))
