#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/11/18 21:37
# @Author : HuangBenHao
import torch

import config
from model import BertNER
from transformers import BertTokenizer
from tqdm import tqdm
import json


def pred(model, tokenizer, line, id2label):
    sent = []
    words = []
    batch_token_starts = [0]

    for token in line:
        sent.append(tokenizer.tokenize(token))
        words.append(tokenizer.tokenize(token))
        batch_token_starts.append(1)

    sent = [i[0] for i in sent]

    words = ['[CLS]'] + [item for token in words for item in token]
    batch_masks = [[True for i in range(len(words))]]
    label_masks = [[True for i in range(len(words) - 1)]]
    words = [tokenizer.convert_tokens_to_ids(words)]
    words = torch.tensor(words, dtype=torch.long)
    words = words.to(config.device)
    batch_token_starts = torch.tensor([batch_token_starts], dtype=torch.long)
    batch_token_starts = batch_token_starts.to(config.device)
    batch_masks = torch.tensor(batch_masks)
    label_masks = torch.tensor(label_masks)
    batch_masks = batch_masks.to(config.device)
    label_masks = label_masks.to(config.device)

    batch_output = model((words, batch_token_starts), token_type_ids=None, attention_mask=batch_masks)[0]
    batch_output = model.crf.decode(batch_output, mask=label_masks)

    #     print(sent)
    #     print(batch_output[0])
    #     print([id2label.get(idx) for idx in batch_output[0]])

    return {
        "sent": sent,
        "id_output": batch_output[0],
        "label_output": [id2label.get(idx) for idx in batch_output[0]]
    }


if __name__ == "__main__":
    tokenizer = BertTokenizer.from_pretrained(config.bert_model, do_lower_case=True, skip_special_tokens=True)
    model = BertNER.from_pretrained(config.model_dir)
    model.to(config.device)

    lines = []
    with open(r'../data/food/left_all.json', 'r') as f:
        for i in f.readlines():
            data = eval(i)
            lines.append(data)

    results = []
    for item in tqdm(lines):
        item_result = {}
        line = item['text']
        line = line.strip()
        line = line.replace(" ", "")
        line = line.replace("　", "")
        line = line.replace(" ", "")
        if not line:
            continue
        result = pred(model, tokenizer, line, config.id2label)
        results.append({
            "text": line,
            "result": result
        })

    with open("../data/food/left_all_result.json", "a+", encoding="utf-8") as fp:
        for i in results:
            json.dump(i, fp, ensure_ascii=False)
            fp.writelines('\n')