from data_loader import load_and_process
from model import BiLSTM_CRF_CHAR
from fastNLP import SpanFPreRecMetric
from fastNLP import prepare_torch_dataloader
from torch import optim
from fastNLP import cache_results, FitlogCallback, EarlyStopCallback, CheckpointCallback
import fitlog

fitlog.set_log_dir("char_logs/")

from fastNLP import (
    Trainer,
    LoadBestModelCallback,
    TorchWarmupCallback,
    RichCallback,
    TorchGradClipCallback
)


callbacks = [
    LoadBestModelCallback(),
    TorchWarmupCallback(),
    RichCallback(print_every=10),
    FitlogCallback(),
    EarlyStopCallback("f#f"),
    TorchGradClipCallback(clip_value=5.0, clip_type='norm'),
    CheckpointCallback('../checkpoint', monitor='f#f', topk=1)
]

batch_size = 128
lr = 0.01
epoches = 50

# 记录超参
fitlog.add_hyper({"batch_size": batch_size, "lr": lr, "epoches": epoches})
fitlog.add_hyper({"dropout": 0.5, "pretrain_embedding_path": 'senna_embedding'})
fitlog.add_hyper_in_file(__file__)
databundle = load_and_process()
word_vocab_size = len(databundle.get_vocab("words"))
char_vocab_size = len(databundle.get_vocab("chars"))
target_vocab = databundle.get_vocab("target")

dataloaders = prepare_torch_dataloader(databundle, batch_size=batch_size, num_workers=4)


model = BiLSTM_CRF_CHAR(
    word_embedding_dim=50,
    char_embedding_dim=50,
    hidden_size=100,
    char_state_dim=25,
    char_vocab_size=char_vocab_size,
    word_vocab_size=word_vocab_size,
    num_classes=len(target_vocab),
    pretrain_embedding_path=True,
    dropout=0.5,
    num_layers=1,
    target_vocab=target_vocab,
)

metrics = {
    "f": SpanFPreRecMetric(tag_vocab=target_vocab),
}

optimizer = optim.Adam(model.parameters(), lr=lr)
trainer = Trainer(
    model=model,
    train_dataloader=dataloaders["train"],
    optimizers=optimizer,
    evaluate_dataloaders=dataloaders["dev"],
    metrics=metrics,
    n_epochs=epoches,
    callbacks=callbacks,
    device="cuda:1",
    monitor="f#f",
)
if __name__ == "__main__":
    # print(data_bundle)
    print(databundle.get_dataset("train")[:4])
    trainer.run()
    # print(databundle.get_dataset("dev")[0])
