from .Sent2Vec import Sent2Vec, BertVec, LSTMVec
from .sentence_trainer import TopicTrainer, SentimentTrainer, Distill_LM_Trainer, LMTrainer
import pickle
import os
import sys
import torch
from Dataloader.twitterloader import LMReader, SubReader, AirlineSenti, TopicReader
from torch.utils.data import DataLoader

def TrainTopicModel():
    topic_tr = TopicReader("../data/topic_tr.csv")
    topic_dev = TopicReader("../data/topic_dev.csv")
    topic_te = TopicReader("../data/topic_te.csv")

    tr_loader = DataLoader(topic_tr, batch_size=32, shuffle=False, collate_fn=topic_tr.collate_fn)
    dev_loader = DataLoader(topic_dev, batch_size=32, shuffle=False, collate_fn=topic_dev.collate_fn)
    te_loader = DataLoader(topic_te, batch_size=32, shuffle=False, collate_fn=topic_te.collate_fn)

    bert_dir = "../../bert_en/"
    emb_file = "../saved/bert_embedding.pkl"
    with open("../saved/config.pkl", "rb") as fr:
        config = pickle.load(fr)

    config.num_attention_heads = 1
    config.attn_hidden_size = 300

    model = Sent2Vec(emb_file, config, bert_dir=bert_dir)
    topic_trainer = TopicTrainer(model, 5, config.attn_hidden_size)
    topic_trainer.TopicTrain(tr_loader, dev_loader, te_loader)

def TrainSentimentModel():
    te = SubReader(data_file="../data/test.csv")
    data = AirlineSenti()
    data.load_data("../data/Tweets.csv")
    tr, dev = data.split([0.97, 1.0])
    tr_loader = DataLoader(tr, batch_size=32, shuffle=False, collate_fn=tr.collate_raw_fn)
    dev_loader = DataLoader(dev, batch_size=32, shuffle=False, collate_fn=dev.collate_raw_fn)
    te_loader = DataLoader(te, batch_size=32, shuffle=False, collate_fn=te.collate_raw_fn)

    bert_dir = "../../bert_en/"
    emb_file = "../saved/bert_embedding.pkl"
    with open("../saved/config.pkl", "rb") as fr:
        config = pickle.load(fr)

    config.num_attention_heads = 1
    config.attn_hidden_size = 300

    model = Sent2Vec(emb_file, config, bert_dir=bert_dir)
    senti_trainer = SentimentTrainer(model, config, 3)
    senti_trainer.SentimenTrain(tr_loader, dev_loader, te_loader)

def TrainLMModel():
    tr = LMReader("../data/twitter_LM_tr.txt")
    dev = LMReader("../data/twitter_LM_dev.txt")
    te = LMReader("../data/twitter_LM_te.txt")
    tr_loader = DataLoader(tr, batch_size=32, shuffle=True)
    dev_loader = DataLoader(dev, batch_size=100, shuffle=True)
    te_loader = DataLoader(te, batch_size=100, shuffle=True)

    model = BertVec("../../bert_en/")
    trainer = LMTrainer(model, model.bert.config)
    trainer.LMTrain(tr_loader, dev_loader, te_loader, print_every=10, model_file="../saved/bert_lm.pkl")

def TrainDistillLMModel():
    tr = LMReader("../data/twitter_LM_tr.txt")
    dev = LMReader("../data/twitter_LM_dev.txt")
    te = LMReader("../data/twitter_LM_te.txt")
    tr_loader = DataLoader(tr, batch_size=32, shuffle=True)
    dev_loader = DataLoader(dev, batch_size=100, shuffle=True)
    te_loader = DataLoader(te, batch_size=100, shuffle=True)

    model = BertVec("../../bert_en/")
    if os.path.exists("../saved/bert_lm.pkl"):
        ch = torch.load("../saved/bert_lm.pkl")
        model.load_state_dict(ch['sent2vec'])
    else:
        print("Error! No Teacher Model Pretrained!")
        sys.exit(0)
    print("<========Distill Train=======>")
    s_model = LSTMVec("../saved/bert_embedding.pkl", "../../bert_en/")
    trainer = Distill_LM_Trainer(model, s_model, model.bert.config)
    trainer.DistillLM_Train(tr_loader, dev_loader, te_loader, print_every=10, model_file="../saved/distill_LSTM_LM.pkl")