{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "┌───────────────────────────────┐  \n",
    "│        1. 输入数据准备        │  \n",
    "│                               │  \n",
    "│  - 确定输入数据格式           │  \n",
    "│  - 转换输入类型               │  \n",
    "│  - 补齐输入数据到固定长度     │  \n",
    "│  - 重塑输入数据形状           │  \n",
    "└───────────────────────────────┘  \n",
    "                   │  \n",
    "                   ▼  \n",
    "┌───────────────────────────────┐  \n",
    "│         2. 加载模型           │  \n",
    "│                               │  \n",
    "│  - 使用 `onnxruntime` 加载     │  \n",
    "│    ONNX 模型                   │  \n",
    "│  - 检查模型输入输出名称       │  \n",
    "└───────────────────────────────┘  \n",
    "                   │  \n",
    "                   ▼  \n",
    "┌───────────────────────────────┐  \n",
    "│         3. 进行推理           │  \n",
    "│                               │  \n",
    "│  - 使用 `InferenceSession`     │  \n",
    "│  - 调用 `run` 方法             │  \n",
    "│  - 提供输入字典               │  \n",
    "└───────────────────────────────┘  \n",
    "                   │  \n",
    "                   ▼  \n",
    "┌───────────────────────────────┐  \n",
    "│         4. 处理输出           │  \n",
    "│                               │  \n",
    "│  - 获取推理结果               │  \n",
    "│  - 进行后续处理（如可视化）    │  \n",
    "│  - 返回结果                   │  \n",
    "└───────────────────────────────┘"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "简单的模型推理核心步骤代码\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np  \n",
    "import onnxruntime as ort  \n",
    "\n",
    "def inference(model_path, input):  \n",
    "    # 将输入转换为 NumPy 数组，指定类型为 int64  \n",
    "    input = np.array(input, dtype=np.int64)  \n",
    "    \n",
    "    # 在前面补零，确保输入长度为 256  \n",
    "    input = np.pad(input, (256 - len(input), 0), mode='constant', constant_values=0)  \n",
    "    \n",
    "    # 将输入形状调整为模型所需的格式，假定模型的输入形状需要为 (N, 1)  \n",
    "    data = np.reshape(input, (-1, 1))  \n",
    "    \n",
    "    # 加载 ONNX 模型  \n",
    "    model1 = ort.InferenceSession(model_path)  \n",
    "    \n",
    "    # 运行推理，输入字典需要与模型的输入名称匹配  \n",
    "    result = model1.run(input_feed={'in': data}, output_names=['out'])[0]  \n",
    "    \n",
    "    # 返回推理结果  \n",
    "    return result  \n",
    "\n",
    "# 示例主函数  \n",
    "def main():  \n",
    "    model_path = \"path/to/your/model.onnx\"  \n",
    "    input_data = [101, 304, 993, 1008, 102]  \n",
    "    \n",
    "    # 调用推理函数并打印结果  \n",
    "    result = inference(model_path, input_data)  \n",
    "    print(\"Inference Result:\", result)  \n",
    "\n",
    "# 执行主函数  \n",
    "if __name__ == \"__main__\":  \n",
    "    main()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "模型推理 核心段通用代码\n",
    "\n",
    "| 项目            | 修改内容                            | 说明                                 |  \n",
    "|-----------------|-------------------------------------|--------------------------------------|  \n",
    "| 模型路径        | `model_path = \"path/to/your/model.onnx\"`  | 替换为你的模型文件的实际路径           |  \n",
    "| 输入数据        | `input_data = [101, 304, 993, 1008, 102]`  | 替换为符合你模型要求的输入数据         |  \n",
    "| 输入长度        | `desired_length = 256`              | 根据模型要求修改为预期的输入长度       |  \n",
    "| 数据类型        | `input_array = np.array(input_data, dtype=np.float32)` | 根据模型要求修改为合适的输入数据类型 |  \n",
    "| 输入名称        | `input_name = session.get_inputs()[0].name` | 确保输入名称与模型定义保持一致         |  \n",
    "| 输出名称        | `output_name = session.get_outputs()[0].name`| 确保输出名称与模型定义保持一致         |  \n",
    "| 后续处理逻辑    | `print(\"Inference Result:\", inference_result)` | 根据需要添加结果的后续处理逻辑         |"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "ename": "InvalidProtobuf",
     "evalue": "[ONNXRuntimeError] : 7 : INVALID_PROTOBUF : Load model from C:\\Users\\Gaona\\vs coda代码\\蓝桥杯数据\\model.pt failed:Protobuf parsing failed.",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mInvalidProtobuf\u001b[0m                           Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[3], line 55\u001b[0m\n\u001b[0;32m     53\u001b[0m \u001b[38;5;66;03m# 执行主程序  \u001b[39;00m\n\u001b[0;32m     54\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;18m__name__\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m__main__\u001b[39m\u001b[38;5;124m\"\u001b[39m:  \n\u001b[1;32m---> 55\u001b[0m     \u001b[43mmain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
      "Cell \u001b[1;32mIn[3], line 42\u001b[0m, in \u001b[0;36mmain\u001b[1;34m()\u001b[0m\n\u001b[0;32m     39\u001b[0m desired_length \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m256\u001b[39m  \u001b[38;5;66;03m# 假设模型的输入要求为 256  \u001b[39;00m\n\u001b[0;32m     41\u001b[0m \u001b[38;5;66;03m# 1. 加载模型  \u001b[39;00m\n\u001b[1;32m---> 42\u001b[0m session \u001b[38;5;241m=\u001b[39m \u001b[43mload_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_path\u001b[49m\u001b[43m)\u001b[49m  \n\u001b[0;32m     44\u001b[0m \u001b[38;5;66;03m# 2. 预处理输入  \u001b[39;00m\n\u001b[0;32m     45\u001b[0m processed_input \u001b[38;5;241m=\u001b[39m preprocess_input(input_data, desired_length)  \n",
      "Cell \u001b[1;32mIn[3], line 8\u001b[0m, in \u001b[0;36mload_model\u001b[1;34m(model_path)\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload_model\u001b[39m(model_path):  \n\u001b[0;32m      5\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"  \u001b[39;00m\n\u001b[0;32m      6\u001b[0m \u001b[38;5;124;03m    加载 ONNX 模型  \u001b[39;00m\n\u001b[0;32m      7\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m  \n\u001b[1;32m----> 8\u001b[0m     session \u001b[38;5;241m=\u001b[39m \u001b[43mort\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mInferenceSession\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_path\u001b[49m\u001b[43m)\u001b[49m  \n\u001b[0;32m      9\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m session\n",
      "File \u001b[1;32mc:\\Users\\Gaona\\.conda\\envs\\pytorchgpu\\lib\\site-packages\\onnxruntime\\capi\\onnxruntime_inference_collection.py:419\u001b[0m, in \u001b[0;36mInferenceSession.__init__\u001b[1;34m(self, path_or_bytes, sess_options, providers, provider_options, **kwargs)\u001b[0m\n\u001b[0;32m    416\u001b[0m disabled_optimizers \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdisabled_optimizers\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    418\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 419\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_create_inference_session\u001b[49m\u001b[43m(\u001b[49m\u001b[43mproviders\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprovider_options\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdisabled_optimizers\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    420\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mValueError\u001b[39;00m, \u001b[38;5;167;01mRuntimeError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    421\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_enable_fallback:\n",
      "File \u001b[1;32mc:\\Users\\Gaona\\.conda\\envs\\pytorchgpu\\lib\\site-packages\\onnxruntime\\capi\\onnxruntime_inference_collection.py:480\u001b[0m, in \u001b[0;36mInferenceSession._create_inference_session\u001b[1;34m(self, providers, provider_options, disabled_optimizers)\u001b[0m\n\u001b[0;32m    477\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_register_ep_custom_ops(session_options, providers, provider_options, available_providers)\n\u001b[0;32m    479\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_model_path:\n\u001b[1;32m--> 480\u001b[0m     sess \u001b[38;5;241m=\u001b[39m \u001b[43mC\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mInferenceSession\u001b[49m\u001b[43m(\u001b[49m\u001b[43msession_options\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_model_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_read_config_from_model\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    481\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    482\u001b[0m     sess \u001b[38;5;241m=\u001b[39m C\u001b[38;5;241m.\u001b[39mInferenceSession(session_options, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_model_bytes, \u001b[38;5;28;01mFalse\u001b[39;00m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_read_config_from_model)\n",
      "\u001b[1;31mInvalidProtobuf\u001b[0m: [ONNXRuntimeError] : 7 : INVALID_PROTOBUF : Load model from C:\\Users\\Gaona\\vs coda代码\\蓝桥杯数据\\model.pt failed:Protobuf parsing failed."
     ]
    }
   ],
   "source": [
    "import numpy as np  \n",
    "import onnxruntime as ort  \n",
    "\n",
    "def load_model(model_path):  \n",
    "    \"\"\"  \n",
    "    加载 ONNX 模型  \n",
    "    \"\"\"  \n",
    "    session = ort.InferenceSession(model_path)  \n",
    "    return session  \n",
    "\n",
    "def preprocess_input(input_data, desired_length):  \n",
    "    \"\"\"  \n",
    "    预处理输入数据  \n",
    "    - 转换为 numpy 数组  \n",
    "    - 补全至固定长度  \n",
    "    - 重塑为模型输入形状  \n",
    "    \"\"\"  \n",
    "    input_array = np.array(input_data, dtype=np.float32)  # 假设输入类型为 float32  \n",
    "    padded_input = np.pad(input_array, (desired_length - len(input_array), 0), mode='constant', constant_values=0)  \n",
    "    # 重塑为 (1, desired_length) 的形状  \n",
    "    reshaped_input = padded_input.reshape(1, -1)  # 根据模型输入的要求进行重塑  \n",
    "    return reshaped_input  \n",
    "\n",
    "def run_inference(session, input_data):  \n",
    "    \"\"\"  \n",
    "    进行推理  \n",
    "    \"\"\"  \n",
    "    # 获取模型输入输出名称  \n",
    "    input_name = session.get_inputs()[0].name  # 获取第一个输入的名称  \n",
    "    output_name = session.get_outputs()[0].name  # 获取第一个输出的名称  \n",
    "\n",
    "    # 运行推理  \n",
    "    result = session.run([output_name], {input_name: input_data})  \n",
    "    return result  \n",
    "\n",
    "def main():  \n",
    "    model_path = r\"C:\\Users\\Gaona\\vs coda代码\\蓝桥杯数据\\model.pt\"  # 替换为你的模型路径  \n",
    "    input_data = [101, 304, 993, 1008, 102]  # 示例输入数据  \n",
    "    desired_length = 256  # 假设模型的输入要求为 256  \n",
    "    \n",
    "    # 1. 加载模型  \n",
    "    session = load_model(model_path)  \n",
    "    \n",
    "    # 2. 预处理输入  \n",
    "    processed_input = preprocess_input(input_data, desired_length)  \n",
    "    \n",
    "    # 3. 进行推理  \n",
    "    inference_result = run_inference(session, processed_input)  \n",
    "    \n",
    "    # 4. 打印推理结果  \n",
    "    print(\"Inference Result:\", inference_result)  \n",
    "\n",
    "# 执行主程序  \n",
    "if __name__ == \"__main__\":  \n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
