import numpy as np
from transformers import BertTokenizer
import onnxruntime as ort
import torch
import torch.nn.functional as F

# pip install onnxruntime -i https://pypi.tuna.tsinghua.edu.cn/simple
# 初始化
tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
ort_session = ort.InferenceSession("bert_base_chinese.onnx")

# 文本预处理
text = "非常好"
inputs = tokenizer(text, return_tensors="np", padding="max_length", max_length=128)

# ONNX推理
pred_logits = ort_session.run(
    None,
    {
        "input_ids": inputs["input_ids"].astype(np.int64),
        "attention_mask": inputs["attention_mask"].astype(np.int64),
        "token_type_ids": inputs["token_type_ids"].astype(np.int64)
    }
)
pred_logits = torch.tensor(pred_logits)
print(pred_logits.shape)
pred_softmax = F.softmax(pred_logits, dim=1) # 对 logit 分数做 softmax 运算
print(pred_softmax.shape)
n = 2
top_n = torch.topk(pred_softmax, n)
print(top_n)
pred_ids = top_n.indices.numpy()[0]
names = ["负向评价", "正向评价"]
print(pred_ids)

# print(names[pred_ids[0]])
confs = top_n.values.numpy()[0]
print(confs)
print(confs[0] * 100)





# 获取输出 (outputs[0]是last_hidden_state, outputs[1]是pooler_output)
# print(outputs[0].shape)  # (1, 128, 768)