import os

from torch.utils.data import DataLoader
from transformers import AutoModelForSeq2SeqLM, AutoConfig, BertTokenizer

from constants import DOMAIN_SLOT_TAG, VALUE_SLOT_TAG
from main import define_args, add_ontology_special_tokens
import torch
import torch.nn as nn

from src.dataset import TripleExtraDataset
from src.trainer import Trainer
from src.utils import load_model_and_parallel


def test():
    args = define_args()

    config = AutoConfig.from_pretrained(
        args.config_name if args.config_name else args.model_path_or_name,
        decoder_start_token_id=0,
        early_stopping=False,
        no_repeat_ngram_size=0,
        dropout=args.dropout
    )

    model = AutoModelForSeq2SeqLM.from_pretrained(
        args.model_name_or_path,
        config=config
    )

    tokenizer = BertTokenizer.from_pretrained(
        args.tokenizer_name if args.tokenizer_name else args.model_path_or_name
    )

    ontology_special_tokens, labels_map = add_ontology_special_tokens(args.ontology)
    all_special_tokens = ontology_special_tokens + [DOMAIN_SLOT_TAG] + [VALUE_SLOT_TAG]
    tokenizer.add_special_tokens({'additional_special_tokens': all_special_tokens})

    model.resize_token_embeddings(len(tokenizer))
    model_saved_path = args.output_dir

    best_res = 0
    for path in os.listdir(model_saved_path):
        if '.bin' in path:
            res = float(path.split('_')[1][:-4])
            if res > best_res:
                best_res = res

    best_model_path = os.path.join(args.output_dir, f'model_{best_res}.bin')
    model.load_state_dict(torch.load(best_model_path))
    model, device = load_model_and_parallel(args, model)

    test_path = os.path.join(args.data_dir, 'test.json')
    test_dataset = TripleExtraDataset(args, test_path, tokenizer)
    test_loader = DataLoader(test_dataset, shuffle=False, batch_size=args.t_batch_size)

    loss_function = nn.CrossEntropyLoss()
    trainer = Trainer(args, model, tokenizer, labels_map, None, test_loader, loss_function, None, None,
                      device)
    metrics = trainer.eval()
    test_micro_f1 = metrics['micro avg']['f1-score']
    print('test micro f1 is %.5f' % test_micro_f1)


if __name__ == '__main__':
    test()
