import torch
from Dataloader.twitterloader import LMReader
from SentModel.sentence_trainer import LMTrainer
from SentModel.Sent2Vec import BertVec
from torch.utils.data import DataLoader

tr = LMReader("../data/twitter_LM_tr.txt")
dev = LMReader("../data/twitter_LM_dev.txt")
te = LMReader("../data/twitter_LM_te.txt")

batchsize_GPU = 16
GPU_Count = torch.cuda.device_count()
train_batchsize = batchsize_GPU*GPU_Count
tr_loader = DataLoader(tr, batch_size=train_batchsize, shuffle=True)
dev_loader = DataLoader(dev, batch_size=100, shuffle=True)
te_loader = DataLoader(te, batch_size=100, shuffle=True)

model = BertVec("../../bert_en/", bert_parallel=True)
trainer = LMTrainer(model, model.bert.module.config)
trainer.LMTrain(tr_loader, dev_loader, te_loader, max_epoch=5, print_every=10, valid_every=10000, learning_rate=2e-3, model_file="../saved/bert_lm.pkl")