import time
import torch.optim
from torch.utils.data import DataLoader
from data import NERDataset, build_corpus
from model.BiLSTM import BiLSTM
from model.config import LSTMConfig, TrainingConfig
from utils import save_model
from sklearn.metrics import f1_score

if __name__ == "__main__":
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    train_data, train_tag, word_2_idx, tag_2_idx = build_corpus("train", make_vocab=True)
    dev_data, dev_tag = build_corpus("dev", make_vocab=False)

    corpus_num = len(word_2_idx)
    class_num = len(tag_2_idx)

    # 参数获取
    epoch = TrainingConfig.epoch
    train_batch_size = TrainingConfig.train_batch_size
    dev_batch_size = TrainingConfig.dev_batch_size
    lr = TrainingConfig.lr
    embedding_num = LSTMConfig.embedding_num
    hidden_num = LSTMConfig.hidden_num
    bi = True

    start = time.time()
    train_dataset = NERDataset(train_data, train_tag, word_2_idx, tag_2_idx)
    train_dataloader = DataLoader(train_dataset, train_batch_size, shuffle=False,
                                  collate_fn=train_dataset.pro_batch_data)

    dev_dataset = NERDataset(dev_data, dev_tag, word_2_idx, tag_2_idx)
    dev_dataloader = DataLoader(dev_dataset, batch_size=dev_batch_size, shuffle=False,
                                collate_fn=dev_dataset.pro_batch_data)

    model = BiLSTM(embedding_num, hidden_num, corpus_num, class_num, word_2_idx["<PAD>"], bi)
    opt = torch.optim.Adam(model.parameters(), lr=lr)
    model = model.to(device)
    for e in range(epoch):
        model.train()
        for batch_data, batch_tag, batch_len in train_dataloader:
            train_loss = model.forward(batch_data,batch_len, batch_tag)
            train_loss.backward()
            opt.step()
            opt.zero_grad()
        # print(f"train loss:{loss:.4f}")

        model.eval()
        all_pre = []
        all_tag = []
        for dev_batch_data, dev_batch_tag, dev_batch_len in dev_dataloader:
            dev_loss = model.forward(dev_batch_data, dev_batch_len, dev_batch_tag)
            all_pre.extend(model.pre.detach().cpu().numpy().tolist())
            all_tag.extend(dev_batch_tag.detach().cpu().numpy().reshape(-1).tolist())
        score = f1_score(all_tag, all_pre, average="micro")
        print(f"{e},f1_score:{score:.5f},dev_loss:{dev_loss:.5f},train_loss:{train_loss:.5f}")

    save_model(model, "./ckpts/bi_lstm.pkl")
    print("训练完毕，共用时{:.2f}s，模型已保存" .format(time.time()-start))
