from RumdetecFramework.SubRDM import SubRDM, RDM
from RumdetecFramework.SubRRD import SubRRD
from RumdetecFramework.AdverRRD import BertRD
from Dataloader.twitterloader import TwitterSet, shuffle_data, SubReader, BertEmbedding
from SentModel.Sent2Vec import *
from torch.utils.data import DataLoader


def obtain_general_set(w2v_flag=False):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix="../data/twitter_tr4", min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix="../data/twitter_dev4")
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix="../data/twitter_te4")
    if w2v_flag:
        w2v = BertEmbedding("../../bert_en", "../saved/bert_embedding.pkl")
        tr_set.w2v = w2v
        dev_set.w2v = w2v
        te_set.w2v = w2v
    return tr_set, dev_set, te_set

def obtain_normal_set():
    tr_set, dev_set, te_set = obtain_general_set()
    tr_set, dev_set, te_set = shuffle_data(tr_set, dev_set, te_set)
    return tr_set, dev_set, te_set

def NormalTrain_SubRDM(tr_set, dev_set, te_set):
    sub_loader = SubReader("../data/sub_train.csv")
    model = SubRDM(dev_set, te_set)
    model.subj_model.load_model(pretrained_file="../saved/best_sub_model.pkl")
    print("Sentiment Valid:", model.subj_model.valid() )
    model.train(tr_set, sub_loader,  saved_model_file = "../saved/SubRDM_Normal.pkl")

def NormalTrain_SimpleRD(tr_set, dev_set, te_set):
    with open("../saved/config.pkl", "rb") as fr:
        config = pickle.load(fr)
    model = SubRRD(dev_set, te_set, config, bert_dir="../../bert_en/")
    model.SimpleTrain(tr_set, max_iters=10000, saved_model_file = "../saved/SimpleRRD_Normal.pkl")

def NormalTrain_SubRRD(tr_set, dev_set, te_set):
    sub_tr_file = "../data/sub_train.csv"
    sub_dev_file = "../data/sub_train.csv"
    sub_te_file = "../data/sub_train.csv"
    sub_tr = SubReader(sub_tr_file)
    sub_dev = SubReader(sub_dev_file)
    sub_te = SubReader(sub_te_file)

    with open("../saved/config.pkl", "rb") as fr:
        config = pickle.load(fr)
    config.num_attention_heads = 1
    config.attn_hidden_size = 256
    config.hidden_dropout_prob = 0.2

    model = SubRRD(dev_set, te_set, config, bert_dir="../../bert_en/")
    if os.path.exists("../saved/subjModel.pkl"):
        model.sent2vec.load_model("../saved/subjModel.pkl")
    else:
        model.sent2vec.SentimenTrain(sub_tr, sub_dev, sub_te, learning_rate=2e-3)
    model.sent2vec.SentiValid(sub_te)
    model.trainIters(tr_set, sub_tr, max_iters=10000, saved_model_file = "../saved/SubRRD_Normal.pkl")

def NormalTrain_RDM(tr_set, dev_set, te_set):
    model = RDM()
    tr_set.filter_short_seq(min_len=5)
    print("Training Setting: tr/dev/te = %3d/%3d/%3d"%(len(tr_set), len(dev_set), len(te_set)))
    tr_loader = DataLoader(tr_set, batch_size=20, shuffle=True, collate_fn=tr_set.collate_df_batch)
    dev_loader = DataLoader(dev_set, batch_size=20, shuffle=False, collate_fn=dev_set.collate_df_batch)
    te_loader = DataLoader(te_set, batch_size=20, shuffle=False, collate_fn=te_set.collate_df_batch)
    model.train(tr_loader, dev_loader, te_loader, saved_model_file = "../saved/RDM_Normal.pkl")

def LoopTrainRDM(max_training_num=-1):
    dir = "../../pheme-rnr-dataset"
    events = [os.path.join(dir, item) for item in os.listdir(dir)]
    events = [e for e in events if os.path.isdir(e)]
    for i in range(len(events)):
        te = TwitterSet()
        te.load_event_list([ events[i] ], cached_pkl_file='./data/pheme.pkl')
        sets = te.split([0.5, 1.0])
        tr = TwitterSet()
        tr.load_event_list([events[j] for j in range(len(events)) if j != i], cached_pkl_file='./data/pheme.pkl')
        tr.filter_short_seq(min_len=5)
        if max_training_num != -1:
            idxs = random.sample(list(range(len(tr))), max_training_num)
            tr.data_ID = [tr.data_ID[idx] for idx in idxs]
            tr.data_len = [tr.data_len[idx] for idx in idxs]
            tr.data_y = [tr.data_y[idx] for idx in idxs]
        print("%s : (test event)/(train event) = %3d/%3d" % (events[i].split('/')[-1], len(te), len(tr)))
        print("\n\n ---------------  TestSet:  %s  ----------------- \n\n"%(events[i].split('/')[-1]))
        model = RDM(sets[0], sets[1])
        model.train(tr, saved_model_file="RDM_General_Event%d.pkl"%i)

def LoopTrainSubRDM(max_training_num=-1, sub_data_file="../data/sub_train.csv"):
    sub_loader = SubReader(sub_data_file)
    dir = "../../pheme-rnr-dataset"
    events = [os.path.join(dir, item) for item in os.listdir(dir)]
    events = [e for e in events if os.path.isdir(e)]
    for i in range(len(events)):
        te = TwitterSet()
        te.load_event_list([ events[i] ], cached_pkl_file='./data/pheme.pkl')
        sets = te.split([0.5, 1.0])
        tr = TwitterSet()
        tr.load_event_list([events[j] for j in range(len(events)) if j != i], cached_pkl_file='./data/pheme.pkl')
        tr.filter_short_seq(min_len=5)
        if max_training_num != -1:
            idxs = random.sample(list(range(len(tr))), max_training_num)
            tr.data_ID = [tr.data_ID[idx] for idx in idxs]
            tr.data_len = [tr.data_len[idx] for idx in idxs]
            tr.data_y = [tr.data_y[idx] for idx in idxs]
        print("%s : (test event)/(train event) = %3d/%3d" % (events[i].split('/')[-1], len(te), len(tr)))
        print("\n\n ---------------  TestSet:  %s  ----------------- \n\n"%(events[i].split('/')[-1]))
        model = SubRDM(sets[0], sets[1])
        model.train(tr, sub_loader=sub_loader, saved_model_file="SubRDM_General_Event%d.pkl"%i, test_event=events[i].split('/')[-1])

def Comparison(max_training_num=-1, sub_data_file="../data/sub_train.csv", repeat=1):
    sub_loader = SubReader(sub_data_file)
    dir = "../../pheme-rnr-dataset"
    events = [os.path.join(dir, item) for item in os.listdir(dir)]
    events = [e for e in events if os.path.isdir(e)]
    for _ in range(repeat):
        for i in range(len(events)):
            log_dir = "./%s_logs/"%(events[i].split('/')[-1])
            if not os.path.exists(log_dir):
                os.system("mkdir %s"%log_dir)
            te = TwitterSet()
            te.load_event_list([ events[i] ], cached_pkl_file='./data/pheme.pkl')
            sets = te.split([0.5, 1.0])
            tr = TwitterSet()
            tr.load_event_list([events[j] for j in range(len(events)) if j != i], cached_pkl_file='./data/pheme.pkl')
            tr.filter_short_seq(min_len=5)
            if max_training_num != -1:
                idxs = random.sample(list(range(len(tr))), max_training_num)
                tr.data_ID = [tr.data_ID[idx] for idx in idxs]
                tr.data_len = [tr.data_len[idx] for idx in idxs]
                tr.data_y = [tr.data_y[idx] for idx in idxs]
            print("%s : (test event)/(train event) = %3d/%3d" % (events[i].split('/')[-1], len(te), len(tr)))
            print("\n\n ---------------  TestSet:  %s  ----------------- \n\n"%(events[i].split('/')[-1]))
            try:
                print("\n\n===========SubRDM Train===========\n\n")
                model = SubRDM(sets[0], sets[1])
                model.train(tr, sub_loader=sub_loader, saved_model_file="SubRDM_General_Event%d.pkl"%i, test_event=events[i].split('/')[-1])
                print("\n\n===========RDM Train===========\n\n")
                model = RDM(sets[0], sets[1])
                model.train(tr, saved_model_file="SubRDM_General_Event%d.pkl"%i,
                       test_event=events[i].split('/')[-1])
            except:
                raise
            else:
                pass

if __name__ == '__main__':
    tr, dev, te = obtain_general_set()
    # tr.filter_short_seq(5)
    tr.trim_long_seq(10)
    # tr.ResortSample(6)
    with open("../saved/config.pkl", "rb") as fr:
        config = pickle.load(fr)
    print("===============Simple Train================")
    BRD = BertRD(config, bert_dir="../../bert_en/")
    BRD.SimpleTrain(tr, dev, te, max_epochs=55, saved_model_file="../saved/BertRD_Simple_4.pkl")
    print("===============Adversarial Train================")
    # BRD = BertRD(config, bert_dir="../../bert_en/")
    # BRD.trainIters(tr, dev, te, max_epochs=15)
    # NormalTrain_RDM(tr, dev, te)