from Dataloader.twitterloader import TopicReader
from SentModel.sentence_trainer import TopicTrainer, TopicGANTrainer
from SentModel.Sent2Vec import BertVec
import sys
sys.path.append("..")

tr = TopicReader("../data/TwitterTopic_tr.csv")
dev = TopicReader("../data/TwitterTopic_dev.csv")
te = TopicReader("../data/TwitterTopic_te.csv")

def collate_sents(batch):
    sents = [item[0] for item in batch]
    return sents

bvec = BertVec("../../bert_en", bert_parallel=True)

# lvec = W2VLSTMVec("../saved/word2vec_cn/", 300, 1, None, False)
# tr_loader = DataLoader(tr, batch_size=8, shuffle=True, collate_fn=collate_sents)
# dev_loader = DataLoader(dev, batch_size=20, shuffle=False, collate_fn=collate_sents)
# te_loader = DataLoader(te, batch_size=20, shuffle=False, collate_fn=collate_sents)
# lm_trainer = LMTrainer(bvec, bvec.bert.module.config)
# lm_trainer.LMTrain(tr_loader, dev_loader, te_loader, max_epoch=1, learning_rate=2e-4, model_file="bert_cn_weibolm.pkl")

# ch = torch.load("../saved/bert_cn_weibolm.pkl")
# bvec.load_state_dict(ch['sent2vec'])

# trainer = TopicTrainer(bvec, topic_label_num=5, sent_hidden_size=768)
trainer = TopicGANTrainer(bvec, topic_label_num=5, sent_hidden_size=768, grl=False)
trainer.TopicGANTrain(tr, dev, te,
                         max_train_iters=100, min_step=10, max_step=1,
                         valid_every=100, learning_rate=2e-3,
                         model_file="TopicGAN_BERT.pkl")

