import torch
import torch.onnx
from flair.data import Sentence
from flair.models import SequenceTagger
import onnx
import onnxruntime as ort
from typing import Union, Dict, Any, Type, cast, List, Optional
from flair.file_utils import Tqdm, load_torch_state
from pathlib import Path
import pickle


def save_non_model_content_to_pickle(model_file: Union[str, Path]):
    """
    从 .pt 文件中提取非模型部分的内容并保存到 Pickle 文件中。
    """
    # 加载模型字典
    state = load_torch_state(model_file)

    # 提取非模型部分的内容
    non_model_content = {
        "embeddings": state.get("embeddings"),
        "tag_dictionary": state.get("tag_dictionary"),
        "tag_format": state.get("tag_format", "BIOES"),
        "tag_type": state.get("tag_type"),
        "use_crf": state.get("use_crf"),
        "use_rnn": state.get("use_rnn"),
        "reproject_embeddings": state.get("reproject_embeddings", True),
        "init_from_state_dict":True
    }

    # 生成 Pickle 文件路径
    pickle_path = model_file.replace(".pt", ".pkl")

    # 保存到 Pickle 文件
    with open(pickle_path, 'wb') as f:
        pickle.dump(non_model_content, f)

model_file='./models/en-ner-conll03-v0.4.pt'

save_non_model_content_to_pickle(model_file)

# 加载模型
tagger = SequenceTagger.load(model_file)
tagger.eval()
# print(tagger)

# 创建一个句子对象
sentence = Sentence("This is a sample sentence.")

# 使用模型的嵌入层获取输入张量
tagger.embeddings.embed(sentence)
# 获取每个 token 的嵌入向量
tagger_tensor = [token.embedding for token in sentence]
tagger_tensor = torch.stack(tagger_tensor).unsqueeze(0)  # 添加批次维度
# 获取句子的长度
tagger_lengths = torch.tensor([len(sentence)], dtype=torch.long)

# 打印输入张量的形状和类型
print("Input Tensor Shape:", tagger_tensor.shape)
print("Input Tensor Type:", tagger_tensor.dtype)
print("Lengths Shape:", tagger_lengths.shape)
print("Lengths Type:", tagger_lengths.dtype)

# 导出模型
torch.onnx.export(
    tagger,
    (tagger_tensor, tagger_lengths),
    model_file.replace(".pt", ".onnx"),
    input_names=["input_tensor", "lengths"],
    output_names=["features"],
    opset_version=11,
    dynamic_axes={
        "input_tensor": {0: "batch_size", 1: "sequence_length"},  # 设置 input_tensor 的前两个维度为动态
        "lengths": {0: "batch_size"},  # 设置 lengths 的第一个维度为动态
        "features": {0: "batch_size", 1: "sequence_length"}  # 设置 features 的前两个维度为动态
    }
)

print("Model has been successfully converted to ONNX format.")



# 加载导出的 ONNX 模型
onnx_tagger = onnx.load("./models/en-ner-conll03-v0.4.onnx")
# 检查模型是否有效
onnx.checker.check_model(onnx_tagger)
print("ONNX model is valid.")

# 创建 ONNX Runtime 会话
ort_session_tagger = ort.InferenceSession("./models/en-ner-conll03-v0.4.onnx")

# 准备输入数据
ort_inputs_tagger = {
    'input_tensor': tagger_tensor.numpy(),
    'lengths': tagger_lengths.numpy()
}

# 运行 NER 模型
ort_outputs_tagger = ort_session_tagger.run(None, ort_inputs_tagger)
print("ONNX tagger outputs shapes:", [f.shape for f in ort_outputs_tagger])
