import sys
sys.path.append("..")
from Dataloader.dataloader_utils import shuffle_data
from Dataloader.twitterloader import TopicReader, TwitterSet
from SentModel.sentence_trainer import TopicTrainer, TopicGANTrainer, LMTrainer
from RumdetecFramework.AdverRumorFramework import TopicAdverRumorDetection
from SentModel.Sent2Vec import BertVec
from PropModel.SeqPropagation import GRUModel
import torch
import torch.nn as nn
from torch.utils.data import DataLoader

def obtain_general_set(tr_prefix, dev_prefix, te_prefix):
    tr_set = TwitterSet()
    tr_set.load_data_fast(data_prefix=tr_prefix, min_len=5)
    dev_set = TwitterSet()
    dev_set.load_data_fast(data_prefix=dev_prefix)
    te_set = TwitterSet()
    te_set.load_data_fast(data_prefix=te_prefix)
    return tr_set, dev_set, te_set

def obtain_AdverBertRD():
    sent2vec = BertVec("../../bert_en/", bert_parallel=True)

    lm_trainer = LMTrainer(sent2vec, sent2vec.bert.module.config)

    topic_gan = TopicGANTrainer(sent2vec, topic_label_num=5, sent_hidden_size=768, grl=False)

    prop = GRUModel(768, 256, 1, 0.2)
    cls = nn.Linear(256, 2)
    BertRD = TopicAdverRumorDetection(sent2vec, prop, cls, topic_label_num=5)
    BertRD.topic_cls = topic_gan.topic_cls
    return BertRD, topic_gan, lm_trainer

def pretrained_BertRD(pretrained_file):
    sent2vec = BertVec("../../bert_en/", bert_parallel=False)
    prop = GRUModel(768, 256, 1, 0.2)
    cls = nn.Linear(256, 2)
    ch = torch.load(pretrained_file)
    sent2vec.load_state_dict(ch['sent2vec'])
    sent2vec.bert = nn.DataParallel(sent2vec.bert, device_ids=list(range(torch.cuda.device_count())))
    prop.load_state_dict(ch['prop_model'])
    cls.load_state_dict(ch['rdm_cls'])
    BertRD = TopicAdverRumorDetection(sent2vec, prop, cls, topic_label_num=5)
    return BertRD

def collate_sents(batch):
    sents = [sent for item in batch for sent in item[0]]
    return sents


# bvec = BertVec("../../bert_en", bert_parallel=True)
# lvec = W2VLSTMVec("../saved/word2vec_cn/", 300, 1, None, False)
# tr_loader = DataLoader(tr, batch_size=8, shuffle=True, collate_fn=collate_sents)
# dev_loader = DataLoader(dev, batch_size=20, shuffle=False, collate_fn=collate_sents)
# te_loader = DataLoader(te, batch_size=20, shuffle=False, collate_fn=collate_sents)

# ch = torch.load("../saved/bert_cn_weibolm.pkl")
# bvec.load_state_dict(ch['sent2vec'])

# trainer = TopicTrainer(bvec, topic_label_num=5, sent_hidden_size=768)
# trainer = TopicGANTrainer(bvec, topic_label_num=5, sent_hidden_size=768, grl=False)
# trainer.AdversarialTrain(tr, dev, te,
#                          max_train_iters=1000, min_step=10, max_step=1,
#                          valid_every=100, learning_rate=2e-3,
#                          model_file="TopicGAN_BERT.pkl")

model, topicGAN, lm_trainer = obtain_AdverBertRD()

# topic_tr = TopicReader("../data/TwitterTopic_tr.csv")
# topic_dev = TopicReader("../data/TwitterTopic_dev.csv")
# topic_te = TopicReader("../data/TwitterTopic_te.csv")
#
# topicGAN.TopicGANTrain(topic_tr, topic_dev, topic_te,
#                      max_train_iters=1000, min_step=10, max_step=1,
#                      valid_every=100, learning_rate=2e-3,
#                      model_file="TopicGAN_BERT.pkl")

i = 1
tr, dev, te = obtain_general_set("../data/twitter_tr%d"%i, "../data/twitter_dev%d"%i, "../data/twitter_te%d"%i)

for _ in range(5):
    tr_tmp, dev_tmp, te_tmp = shuffle_data(tr, dev, te)
    tr_tmp.filter_short_seq(min_len=5)
    tr_tmp.ResortSample(6)
    tr_loader = DataLoader(tr_tmp, batch_size=4, shuffle=True, collate_fn=collate_sents)
    dev_loader = DataLoader(dev_tmp, batch_size=4, shuffle=False, collate_fn=collate_sents)
    te_loader = DataLoader(te_tmp, batch_size=4, shuffle=False, collate_fn=collate_sents)
    lm_trainer.LMTrain(tr_loader, dev_loader, te_loader, max_epoch=2, learning_rate=2e-4, model_file="bert_cn_weibolm.pkl")

# topic_tr = TopicReader("../data/TwitterTopic_tr.csv")
# topic_dev = TopicReader("../data/TwitterTopic_dev.csv")
# topic_te = TopicReader("../data/TwitterTopic_te.csv")
#
# topicGAN.TopicGANTrain(topic_tr, topic_dev, topic_te,
#                      max_train_iters=1000, min_step=10, max_step=1,
#                      valid_every=100, learning_rate=2e-3,
#                      model_file="TopicGAN_BERT.pkl")

print("%s : (dev event)/(test event)/(train event) = %3d/%3d/%3d" % (te.data[te.data_ID[0]]['event'], len(dev), len(te), len(tr)))
print("\n\n===========Train Settings, GPU:%d===========\n\n"%torch.cuda.device_count())
# model.AdverTrainIters(tr, dev, te, max_epochs=100, valid_every=50,
#                 log_dir="../logs/", log_suffix="_AdverBertRD3",
#                 model_file="AdverBertRD3_%s.pkl"% (te.data[te.data_ID[0]]['event']))
model.train_iters(tr, dev, te, max_epochs=100,
                    log_dir="../logs/", log_suffix="_BertRD",
                    model_file="BertRD_%s.pkl"% (te.data[te.data_ID[0]]['event']))
