# encoding: utf-8

from transformers import pipeline
from transformers import BertForTokenClassification, BertTokenizer
import torch


pretrained_model = r"G:/nlp_about/pretrained_models/hfl-chinese-roberta-wwm-ext"
model = BertForTokenClassification.from_pretrained(pretrained_model)
tokenizer = BertTokenizer.from_pretrained(pretrained_model)


labels = ["BRAND", "MODEL", "NAME", "COLOR", "SPECS", "UNIT"]

categories = ['O']
for label in labels:
    categories.append("B-" + label)
    categories.append("I-" + label)
categories_id2label = {i: k for i, k in enumerate(categories)}
categories_label2id = {k: i for i, k in enumerate(categories)}


sequence = "LZJVHYDAC电磁阀WSM06020W-01M-C-N-24DG/ WSM08130C-01M-C-N WSM08130C-01M-C-N-24DG"

# Bit of a hack to get the tokens with the special tokens
tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sequence)))
inputs = tokenizer.encode(sequence, return_tensors="pt")

outputs = model(inputs).logits
predictions = torch.argmax(outputs, dim=2)

for token, prediction in zip(tokens, predictions[0].numpy()):
    print((token, model.config.id2label[prediction]))
