from model import BiLSTM_CNN_CRF
from fastNLP import SpanFPreRecMetric
from fastNLP import prepare_torch_dataloader
from torch import optim
from fastNLP import cache_results, FitlogCallback, EarlyStopCallback, CheckpointCallback
import fitlog
import sys

sys.path.append("../../")
from bilstm_crf_with_char.data_loader import load_and_process

fitlog.set_log_dir("char_logs/")

from fastNLP import (
    Trainer,
    LoadBestModelCallback,
    TorchWarmupCallback,
    RichCallback,
    TorchGradClipCallback,
)


callbacks = [
    LoadBestModelCallback(),
    TorchWarmupCallback(),
    RichCallback(print_every=10),
    FitlogCallback(),
    EarlyStopCallback("f#f"),
    TorchGradClipCallback(clip_value=5.0, clip_type="norm"),
    CheckpointCallback("../checkpoint", monitor="f#f", topk=1),
]

batch_size = 128
lr = 0.01
epoches = 50
model_name = "bilstm_cnn_crf"
pretrain_embedding_path = True

# 记录超参
fitlog.add_hyper(
    {"batch_size": batch_size, "lr": lr, "epoches": epoches, "model_name": model_name}
)
fitlog.add_hyper({"dropout": 0.5, "pretrain_embedding_path": pretrain_embedding_path})
fitlog.add_hyper_in_file(__file__)
databundle = load_and_process()
word_vocab_size = len(databundle.get_vocab("words"))
char_vocab_size = len(databundle.get_vocab("chars"))
target_vocab = databundle.get_vocab("target")

dataloaders = prepare_torch_dataloader(databundle, batch_size=batch_size, num_workers=4)


model = BiLSTM_CNN_CRF(
    word_embedding_dim=50,
    char_embedding_dim=30,
    hidden_size=100,
    char_vocab_size=char_vocab_size,
    word_vocab_size=word_vocab_size,
    num_classes=len(target_vocab),
    pretrain_embedding_path=pretrain_embedding_path,
    dropout=0.5,
    filters=30,
    window_size=3,
    target_vocab=target_vocab,
)

metrics = {
    "f": SpanFPreRecMetric(tag_vocab=target_vocab),
}

optimizer = optim.Adam(model.parameters(), lr=lr)
trainer = Trainer(
    model=model,
    train_dataloader=dataloaders["train"],
    optimizers=optimizer,
    evaluate_dataloaders=dataloaders["dev"],
    metrics=metrics,
    n_epochs=epoches,
    callbacks=callbacks,
    device="cuda:1",
    monitor="f#f",
)
if __name__ == "__main__":
    # print(data_bundle)
    print(databundle.get_dataset("train")[:4])
    trainer.run()
    # print(databundle.get_dataset("dev")[0])
