import torch.onnx
from flair.models import *
from flair.data import Sentence
from ner_model_npu import *

inputs="I want to go to Beijing."

# 加载pt文件
tagger = SequenceTagger.load("./models/en-ner-conll03-v0.4.pt")
# 创建一个句子对象
tagger_sentence = Sentence(inputs)
# 使用模型的嵌入层获取输入张量
tagger.embeddings.embed(tagger_sentence)
# 获取每个 token 的嵌入向量
tagger_tensor = [token.embedding for token in tagger_sentence]
tagger_tensor = torch.stack(tagger_tensor).unsqueeze(0)  # 添加批次维度
# 获取句子的长度
tagger_lengths = torch.tensor([len(tagger_sentence)], dtype=torch.long)
features=tagger.forward(tagger_tensor, tagger_lengths)
print("PT output:")
print(type(features))
for f in features:
    print(f.shape)


tagger_npu = SequenceTaggerForNPU.load_onnx("./models/en-ner-conll03-v0.4.onnx")
# 创建一个句子对象
tagger_npu_sentence = Sentence(inputs)
# 使用模型的嵌入层获取输入张量
tagger_npu.embeddings.embed(tagger_npu_sentence)
# 获取每个 token 的嵌入向量
tagger_npu_tensor = [token.embedding for token in tagger_npu_sentence]
tagger_npu_tensor = torch.stack(tagger_npu_tensor).unsqueeze(0)  # 添加批次维度
# 获取句子的长度
tagger_npu_lengths = torch.tensor([len(tagger_npu_sentence)], dtype=torch.long)
features_npu=tagger_npu.forward_onnx(tagger_npu_tensor, tagger_npu_lengths)
print("ONNX output:")
print(type(features_npu))
for fn in features_npu:
    print(fn.shape)

# 比较每个 tensor 的内容
for i, (f, fn) in enumerate(zip(features, features_npu)):
    assert f.shape == fn.shape, f"Shapes of features[{i}] and features_npu[{i}] must be the same"

    # 计算两个 tensor 之间的最大绝对差值
    max_abs_diff = torch.max(torch.abs(f - fn)).item()
    print(f"Maximum absolute difference between features[{i}] and features_npu[{i}]: {max_abs_diff}")

tagger.predict(tagger_sentence)
print("PT label:")
print(tagger_sentence)
tagger_npu.predict_onnx(tagger_npu_sentence)
print("ONNX label:")
print(tagger_npu_sentence)
