import sys
sys.path.append('/data/zhuoran/code/cognlp')
from cognlp import *
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import RandomSampler
from transformers import BertTokenizer

from cognlp.io.loader.ner.conll2003 import Conll2003NERLoader
from cognlp.io.processor.ner.conll2003 import NERProcessor
from cognlp.core.metrics import SpanFPreRecMetric
from cognlp.core.trainer import Trainer
from cognlp.core.dataset import NerDataset
from cognlp.models.ner.bert_ner import Bert4Ner
from cognlp.config.ner.conll2003 import args

if __name__ == '__main__':
    torch.cuda.set_device(3)
    device = torch.device("cuda")
    loader = Conll2003NERLoader()
    train_set, dev_set, test_set = loader.load(args.input_dir)
    tokenizer = BertTokenizer.from_pretrained(args.bert_model)
    process = NERProcessor(path="../data/ner/conll2003/data")
    train_data = process.process(train_set, device=device)
    train_data = NerDataset(train_data)
    train_sampler = RandomSampler(train_data)
    dev_data = process.process(test_set, device=device)
    dev_data = NerDataset(dev_data)
    dev_sampler = RandomSampler(dev_data)
    model = Bert4Ner(len(process.vocabulary), embedding_size=1024, device=device)
    metric = SpanFPreRecMetric(tag_vocab=process.vocabulary)
    loss = nn.CrossEntropyLoss(ignore_index=0)
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)

    trainer = Trainer(train_data=train_data, model=model, optimizer=optimizer, loss=loss,
                      batch_size=64, train_sampler=train_sampler, drop_last=False, gradient_accumulation_steps=1,
                      num_workers=5, n_epochs=1, print_every=None, scheduler=None, dev_sampler=dev_sampler,
                      dev_data=dev_data, metrics=metric, metric_key=None, validate_steps=None,
                      save_path="../data/ner/conll2003/model", save_file=None, save_steps=None, use_tqdm=True,
                      device=device, writer_path='../data/ner/conll2003/tensorboard',
                      callbacks=None, check_code_level=0, grad_norm=None, device_ids=[3, 4, 5])
    trainer.train()

    predictor = NerPredictor(model, vocabulary=process.vocabulary, device=device)
    print(predictor.predict("Japan began the defence of their Asian Cup title with."))
    print(1)
