import pandas as pd
from tqdm.auto import tqdm
import numpy as np
import torch
from transformers import AutoConfig, AutoTokenizer, AutoModelForTokenClassification


def get_models(num_folds):
    model_path_list = ["/data/code/ner_%d"%idx for idx in range(num_folds)]
    model_list = []
    config = AutoConfig.from_pretrained(model_path_list[0])
    tokenizer = AutoTokenizer.from_pretrained(model_path_list[0], config=config)
    for idx in range(num_folds):
        model = AutoModelForTokenClassification.from_pretrained(model_path_list[idx], config=config)
        model_list.append(model)
    return config, tokenizer, model_list


def inference():
    config, tokenizer, model_list = get_models(num_folds=5)

    device = torch.device(0)
    for model in model_list:
        model.to(device)

    test_df = pd.read_csv("/data/raw_data/evaluation_public.csv").iloc[:1000]
    sub_df = test_df.dropna()

    tags = []
    seq_len = 510
    for i in tqdm(range(0, len(sub_df), seq_len)):
        start_idx = i
        example = sub_df.iloc[start_idx: start_idx + seq_len]["character"].values.tolist()
        x = tokenizer(example, is_split_into_words=True, return_tensors="pt")
        for k, v in x.items():
            x[k] = v.to(device)

        y_list = []
        for model in model_list:
            y_list.append(model(**x).logits)

        mean_logits = y_list[0]
        for idx in range(0, len(y_list)):
            mean_logits += y_list[idx]
        mean_logits /= len(y_list) * 1.0
        preds = mean_logits.argmax(dim=-1).squeeze().detach().cpu().numpy()
        pred_tag = [config.id2label[pred] for pred in preds]
        tags += pred_tag[1:-1]

    sub_df["tag"] = tags
    sub_df.drop(columns=["character"], inplace=True)
    sub_df["id"] = sub_df["id"].astype(np.int)
    sub_df.to_csv("/data/predict_result/result.csv", index=False)


if __name__ == "__main__":
    inference()