from fastNLP import cache_results, FitlogCallback, EarlyStopCallback
from pipe import BertPipe

from fastNLP.io.loader import Conll2003NERLoader, WeiboNERLoader, MsraNERLoader
from fastNLP.io.pipe import Conll2003NERPipe, WeiboNERPipe, MsraNERPipe
from torch import optim
from fastNLP import (
    Trainer,
    LoadBestModelCallback,
    TorchWarmupCallback,
    RichCallback,
    LRSchedCallback,
)
from fastNLP import SpanFPreRecMetric
from models.bert import BertNER
from models.bilstm_crf import BiLSTM_CRF_NER
from fastNLP import prepare_torch_dataloader
import argparse
import fitlog
import os

os.environ["CUDA_LAUNCH_BLOCKING"] = "1."

fitlog.set_log_dir("logs/")

parser = argparse.ArgumentParser()
parser.add_argument("--dataset_name", type=str, default="conll2003")
parser.add_argument("--model_name", type=str, default="bert-base-uncased")
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--lr", type=float, default=2e-5)
parser.add_argument("--epoches", type=int, default=20)
parser.add_argument("--device", type=str, default="cuda:1")

args = parser.parse_args()

dataset_name = args.dataset_name
model_name = args.model_name

# 根据模型设置默认超参
if model_name == "bilstm_crf":
    args.lr = 0.01
    args.batch_size = 64
    args.epoches = 50
    args.hidden_size = 300
    args.embedding_dim = 300
elif model_name == "bert-base-chinese":
    args.batch_size = 16

batch_size = args.batch_size
lr = args.lr
epoches = args.epoches
dropout = 0.5
fc_dropout = 0.4

# 记录超参
fitlog.add_hyper(args)
fitlog.add_hyper_in_file(__file__)

cache_fn = f"caches/data_bundle_{dataset_name}_{model_name}.pkl"


@cache_results(cache_fn, _refresh=False)
def get_data():
    if dataset_name == "conll2003":
        path = "~/datasets/englishNER/conll2003"
        data_bundle = Conll2003NERLoader().load(path)
        pipe = Conll2003NERPipe()
        data_bundle.rename_field("ner", "target")
    elif dataset_name == "weibo":
        data_bundle = WeiboNERLoader().load()
        pipe = WeiboNERPipe()
    elif dataset_name == "msra":
        data_bundle = MsraNERLoader().load()
        pipe = MsraNERPipe()

    if "bert" in model_name:
        if dataset_name == "weibo" or dataset_name == "msra":
            data_bundle.rename_field("raw_chars", "raw_words")
        pipe = BertPipe(model_name)
        data_bundle, tokenizer = pipe.process(
            data_bundle, input_field_name="raw_words", target_field_name="target"
        )
        return data_bundle, tokenizer

    data_bundle = pipe.process(data_bundle)
    return data_bundle


if "bert" in model_name:
    data_bundle, tokenizer = get_data()
else:
    data_bundle = get_data()


# 中文数据集的原始输入为chars，模型使用words做计算，需要copy到words
if dataset_name in ("weibo", "msra"):
    data_bundle.rename_field("chars", "words")

print(data_bundle)
print(data_bundle.get_dataset("train")[:4])

dataloaders = prepare_torch_dataloader(
    data_bundle, batch_size=batch_size, num_workers=4
)

target_vocab = data_bundle.get_vocab("target")
target_vocab_size = len(target_vocab)

# 使用embedding 的模型需要设置vocab_size 和 target_vocab_size
if model_name == "bilstm_crf":
    input_vocab = data_bundle.get_vocab("words")

    # 中文数据集使用ctb的预训练词向量
    if dataset_name in ("weibo", "msra"):
        embedding_file_path = f"~/datasets/embeddings/ctb.50d.vec"
    else:
        embedding_file_path = "en-glove-42b-300d"

    fitlog.add_hyper(embedding_file_path, name="embedding_file_path")

    model = BiLSTM_CRF_NER(
        vocab=input_vocab,
        num_classes=target_vocab_size,
        embedding_dim=args.embedding_dim,
        hidden_size=args.hidden_size,
        target_vocab=target_vocab,
        embedding_path=embedding_file_path,
    )
elif "bert" in model_name:
    model = BertNER(model_name, target_vocab_size)
    # 如果使用bert，需要设置padding的值
    for dl in dataloaders.values():
        # 可以通过 set_pad 修改 padding 的行为。
        dl.set_pad("input_ids", pad_val=tokenizer.pad_token_id)
        dl.set_pad("target", pad_val=-100)

optimizer = optim.Adam(model.parameters(), lr=lr)
callbacks = [
    LoadBestModelCallback(),
    TorchWarmupCallback(),
    RichCallback(print_every=10),
    FitlogCallback(),
    EarlyStopCallback(5),
]
metrics = {
    "f": SpanFPreRecMetric(tag_vocab=target_vocab),
}

trainer = Trainer(
    model=model,
    train_dataloader=dataloaders["train"],
    optimizers=optimizer,
    evaluate_dataloaders=dataloaders["dev"],
    metrics=metrics,
    n_epochs=epoches,
    callbacks=callbacks,
    device=args.device,
    monitor="f#f",
)

if __name__ == "__main__":
    # print(data_bundle)
    # print(data_bundle.get_dataset("train")[:4])
    trainer.run()
