{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\Gaona\\.conda\\envs\\pytorchgpu\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From c:\\Users\\Gaona\\.conda\\envs\\pytorchgpu\\lib\\site-packages\\keras\\src\\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.\n",
      "\n",
      " * Serving Flask app '__main__'\n",
      " * Debug mode: off\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.\n",
      " * Running on http://127.0.0.1:5000\n",
      "Press CTRL+C to quit\n"
     ]
    }
   ],
   "source": [
    "from collections import OrderedDict\n",
    "import threading\n",
    "from typing import Any, Dict, List\n",
    "\n",
    "from flask import Flask, jsonify, request\n",
    "import torch\n",
    "from torch import nn\n",
    "from torchvision import transforms\n",
    "from torchvision.models import resnet\n",
    "from torchvision.models.segmentation.fcn import FCN\n",
    "from transformers import  pipeline\n",
    "\n",
    "app = Flask(__name__)\n",
    "\n",
    "class FCNHead(nn.Sequential):\n",
    "    def __init__(self, in_channels: int, channels: int) -> None:\n",
    "        inter_channels = in_channels // 4\n",
    "        layers = [\n",
    "            nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(inter_channels),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(0.1),\n",
    "            nn.Conv2d(inter_channels, channels, 1)\n",
    "        ]\n",
    "\n",
    "        super(FCNHead, self).__init__(*layers)\n",
    "\n",
    "\n",
    "class IntermediateLayerGetter(nn.ModuleDict):\n",
    "\n",
    "    def __init__(self, model: nn.Module, return_layers: Dict[str, str]) -> None:\n",
    "        if not set(return_layers).issubset([name for name, _ in model.named_children()]):\n",
    "            raise ValueError(\"return_layers are not present in model\")\n",
    "\n",
    "        orig_return_layers = return_layers\n",
    "        return_layers = {k: v for k, v in return_layers.items()}\n",
    "        layers = OrderedDict()\n",
    "        for name, module in model.named_children():\n",
    "            layers[name] = module\n",
    "            if name in return_layers:\n",
    "                del return_layers[name]\n",
    "            if not return_layers:\n",
    "                break\n",
    "\n",
    "        super(IntermediateLayerGetter, self).__init__(layers)\n",
    "        self.return_layers = orig_return_layers\n",
    "\n",
    "    def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:\n",
    "        out = OrderedDict()\n",
    "        for name, module in self.named_children():\n",
    "            x = module(x)\n",
    "            if name in self.return_layers:\n",
    "                out_name = self.return_layers[name]\n",
    "                out[out_name] = x\n",
    "        return out\n",
    "\n",
    "\n",
    "class FCNImageSegmenter(FCN):\n",
    "    def __init__(self, num_classes: int = 21, **kwargs: Any) -> None:\n",
    "        backbone = resnet.resnet101(\n",
    "            weights=None,\n",
    "            replace_stride_with_dilation=[False, True, True],\n",
    "        )\n",
    "        return_layers = {\"layer4\": \"out\"}\n",
    "        return_layers[\"layer3\"] = \"aux\"\n",
    "        backbone = IntermediateLayerGetter(\n",
    "            backbone, return_layers=return_layers)\n",
    "\n",
    "        inplanes = 1024\n",
    "        aux_classifier = FCNHead(inplanes, num_classes)\n",
    "        inplanes = 2048\n",
    "        classifier = FCNHead(inplanes, num_classes)\n",
    "\n",
    "        super(FCNImageSegmenter, self).__init__(\n",
    "            backbone, classifier, aux_classifier)\n",
    "\n",
    "class SegModel(nn.Module):#注意这里神经网络层nn.mdoel,自定义网络层，卷积层等，实现前向传播\n",
    "\n",
    "    # 单例模式的实例变量，确保全局只有一个 SegModel 实例\n",
    "    _instance= None\n",
    "    # 线程锁，用于确保多线程环境下单例模式的线程安全\n",
    "    _lock = threading.Lock()\n",
    "\n",
    "    def __new__(cls, file_path='model.pth'):\n",
    "        \"\"\"\n",
    "        重写 __new__ 方法，实现单例模式。\n",
    "        :param file_path: 模型权重文件的路径，默认为 'model.pth'\n",
    "        :return: SegModel 的实例\n",
    "        \"\"\"\n",
    "        with cls._lock:  # 加锁，确保线程安全\n",
    "            if cls._instance is None:  # 如果实例不存在，则创建新实例\n",
    "                cls._instance = super(SegModel, cls).__new__(cls)\n",
    "                cls._instance.__init__(file_path)  # 初始化实例\n",
    "            return cls._instance  # 返回实例\n",
    "\n",
    "    def __init__(self, file_path='model.pth'):\n",
    "        \"\"\"\n",
    "        初始化 SegModel 实例。\n",
    "        :param file_path: 模型权重文件的路径，默认为 'model.pth'\n",
    "        \"\"\"\n",
    "        super(SegModel, self).__init__()  # 调用父类 nn.Module 的初始化方法\n",
    "        # 创建 FCNImageSegmenter 模型实例\n",
    "        self.model = FCNImageSegmenter(num_classes=21)\n",
    "        # 加载模型权重\n",
    "        self.model.load_state_dict(torch.load(file_path))\n",
    "        # 将模型设置为评估模式（禁用 dropout 和 batch normalization 的训练行为）\n",
    "        self.model.eval()\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"\n",
    "        定义模型的前向传播逻辑。\n",
    "        :param x: 输入张量\n",
    "        :return: 模型的输出\n",
    "        \"\"\"\n",
    "        return self.model(x)  # 调用模型的 forward 方法并返回结果\n",
    "\n",
    "\n",
    "\n",
    "@app.route('/predict', methods=['POST'])\n",
    "def predict():\n",
    "    \"\"\"\n",
    "    处理预测请求的 API 端点。\n",
    "    接收 JSON 格式的输入数据，调用模型进行预测，并返回结果。\n",
    "    \"\"\"\n",
    "    \"\"\"\n",
    "    model.model() 表示调用 SegModel 实例中的 model 属性的 forward 方法。\n",
    "    SegModel 类是一个包装类，用于管理模型的加载和单例模式。\n",
    "    实际的模型逻辑定义在 FCNImageSegmenter 或 AttentionWithCache 类中，需要通过 model.model() 来访问。\n",
    "    \n",
    "    \"\"\"\n",
    "    # 从请求中获取 JSON 数据\n",
    "    data = request.get_json()\n",
    "    # 提取输入数据\n",
    "    inputs = data['inputs']\n",
    "\n",
    "    # 将输入数据转换为 PyTorch 张量\n",
    "    inputs_tensor = torch.tensor(inputs, dtype=torch.float32)\n",
    "\n",
    "    # 禁用梯度计算（推理阶段不需要梯度）\n",
    "    with torch.no_grad():\n",
    "        # 创建 SegModel 实例\n",
    "        seg_model = SegModel()\n",
    "        # 调用模型进行预测，并获取输出\n",
    "        output = seg_model.model(inputs_tensor)['out']\n",
    "        # 对输出进行 argmax 操作，获取每个像素的预测类别\n",
    "        output = torch.argmax(output, dim=1).squeeze().tolist()\n",
    "\n",
    "    # 将预测结果以 JSON 格式返回\n",
    "    return jsonify({'output': output})\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    app.run()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
