{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "***Environment Setup***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-10T03:33:18.874973Z",
     "iopub.status.busy": "2025-11-10T03:33:18.874718Z",
     "iopub.status.idle": "2025-11-10T03:33:18.877436Z",
     "shell.execute_reply": "2025-11-10T03:33:18.876942Z",
     "shell.execute_reply.started": "2025-11-10T03:33:18.874953Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Upload ERNIE-develop.zip to /home/aistudio/ first and run this cell once\n",
    "!unzip -q /home/aistudio/ERNIE-develop.zip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-11-10T03:33:19.287397Z",
     "iopub.status.busy": "2025-11-10T03:33:19.287163Z",
     "iopub.status.idle": "2025-11-10T03:33:19.289691Z",
     "shell.execute_reply": "2025-11-10T03:33:19.289297Z",
     "shell.execute_reply.started": "2025-11-10T03:33:19.287377Z"
    },
    "jupyter": {
     "outputs_hidden": false
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Run this cell once to install the libraries persistently in the environment\n",
    "!mkdir /home/aistudio/external-libraries\n",
    "!python -m pip install paddlepaddle-gpu==3.2.0 -i https://www.paddlepaddle.org.cn/packages/stable/cu126/\n",
    "!python -m pip install aistudio-sdk\n",
    "!python -m pip install -r ERNIE-develop/requirements/gpu/requirements.txt\n",
    "!python -m pip install fastdeploy-gpu==2.3.0-rc0 -i https://www.paddlepaddle.org.cn/packages/stable/fastdeploy-gpu-80_90/ --extra-index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple\n",
    "!python -m pip install ngrok\n",
    "!python -m pip install pyngrok"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false,
    "execution": {
     "iopub.execute_input": "2025-11-20T20:37:07.040147Z",
     "iopub.status.busy": "2025-11-20T20:37:07.039901Z",
     "iopub.status.idle": "2025-11-20T20:37:07.042601Z",
     "shell.execute_reply": "2025-11-20T20:37:07.042157Z",
     "shell.execute_reply.started": "2025-11-20T20:37:07.040122Z"
    },
    "jupyter": {
     "outputs_hidden": false
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# If the libraries are already installed, you can import them directly by first running the following code\n",
    "import sys\n",
    "sys.path.append('/home/aistudio/external-libraries')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-20T20:37:08.813521Z",
     "iopub.status.busy": "2025-11-20T20:37:08.813240Z",
     "iopub.status.idle": "2025-11-20T20:37:11.538659Z",
     "shell.execute_reply": "2025-11-20T20:37:11.538020Z",
     "shell.execute_reply.started": "2025-11-20T20:37:08.813501Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "import paddle\n",
    "paddle.utils.run_check()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-10T03:39:43.779252Z",
     "iopub.status.busy": "2025-11-10T03:39:43.779004Z",
     "iopub.status.idle": "2025-11-10T03:39:43.781932Z",
     "shell.execute_reply": "2025-11-10T03:39:43.781485Z",
     "shell.execute_reply.started": "2025-11-10T03:39:43.779231Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Run this cell once to download the ERNIE-4.5-VL-28B-A3B-Paddle model\n",
    "!aistudio download --model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle --local_dir models/ERNIE-4.5-VL-28B-A3B-Paddle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-20T20:37:16.257906Z",
     "iopub.status.busy": "2025-11-20T20:37:16.257632Z",
     "iopub.status.idle": "2025-11-20T20:39:46.554252Z",
     "shell.execute_reply": "2025-11-20T20:39:46.553678Z",
     "shell.execute_reply.started": "2025-11-20T20:37:16.257887Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# FastDeploy完整启动代码\n",
    "import subprocess\n",
    "import time\n",
    "import requests\n",
    "import threading\n",
    "\n",
    "def start_fastdeploy():\n",
    "    cmd = [\n",
    "        \"python\", \"-m\", \"fastdeploy.entrypoints.openai.api_server\",\n",
    "        \"--model\", \"models/ERNIE-4.5-VL-28B-A3B-Paddle\",\n",
    "        \"--port\", \"8180\",\n",
    "        \"--metrics-port\", \"8181\", \n",
    "        \"--engine-worker-queue-port\", \"8182\",\n",
    "        \"--max-model-len\", \"32768\",\n",
    "        \"--max-num-seqs\", \"32\",\n",
    "        \"--reasoning-parser\", \"ernie-45-vl\",\n",
    "        \"--quantization\", \"wint8\"\n",
    "    ]\n",
    "    \n",
    "    print(\"🚀 启动FastDeploy服务...\")\n",
    "    print(\"-\" * 50)\n",
    "    \n",
    "    process = subprocess.Popen(\n",
    "        cmd,\n",
    "        stdout=subprocess.PIPE,\n",
    "        stderr=subprocess.STDOUT,\n",
    "        universal_newlines=True,\n",
    "        bufsize=1\n",
    "    )\n",
    "    \n",
    "    print(f\"📝 PID: {process.pid}\")\n",
    "    \n",
    "    service_ready = False\n",
    "    \n",
    "    def monitor_logs():\n",
    "        nonlocal service_ready\n",
    "        try:\n",
    "            while True:\n",
    "                output = process.stdout.readline()\n",
    "                if output == '' and process.poll() is not None:\n",
    "                    break\n",
    "                if output:\n",
    "                    line = output.strip()\n",
    "                    if not \"Loading Weights:\" in line:\n",
    "                        print(f\"[日志] {line}\")\n",
    "                    \n",
    "                    if \"Loading Weights:\" in line and \"100%\" in line:\n",
    "                        print(\"✅ 权重加载完成\")\n",
    "                    elif \"Loading Layers:\" in line and \"100%\" in line:\n",
    "                        print(\"✅ 层加载完成\")\n",
    "                    elif \"Worker processes are launched\" in line:\n",
    "                        print(\"✅ 工作进程启动\")\n",
    "                    elif \"Listening at\" in line:\n",
    "                        print(\"🎉 服务启动完成！\")\n",
    "                        service_ready = True\n",
    "                        break\n",
    "        except Exception as e:\n",
    "            print(f\"日志监控错误: {e}\")\n",
    "    \n",
    "    log_thread = threading.Thread(target=monitor_logs, daemon=True)\n",
    "    log_thread.start()\n",
    "    \n",
    "    start_time = time.time()\n",
    "    while time.time() - start_time < 1800:\n",
    "        if service_ready:\n",
    "            break\n",
    "        if process.poll() is not None:\n",
    "            print(\"❌ 进程退出\")\n",
    "            return None\n",
    "        time.sleep(1)\n",
    "    \n",
    "    if not service_ready:\n",
    "        print(\"❌ 启动超时\")\n",
    "        process.terminate()\n",
    "        return None\n",
    "    \n",
    "    print(\"-\" * 50)\n",
    "    return process\n",
    "\n",
    "def test_model():\n",
    "    try:\n",
    "        import openai\n",
    "        \n",
    "        print(\"🔌 测试模型连接...\")\n",
    "        \n",
    "        client = openai.Client(base_url=\"http://localhost:8180/v1\", api_key=\"null\")\n",
    "        \n",
    "        response = client.chat.completions.create(\n",
    "            model=\"null\",\n",
    "            messages=[\n",
    "                {\"role\": \"system\", \"content\": \"你是一个有用的AI助手。\"},\n",
    "                {\"role\": \"user\", \"content\": \"你好\"}\n",
    "            ],\n",
    "            max_tokens=2048,\n",
    "            stream=False\n",
    "        )\n",
    "        \n",
    "        print(\"✅ 模型测试成功！\")\n",
    "        print(f\"🤖 回复: {response.choices[0].message.content}\")\n",
    "        return True\n",
    "        \n",
    "    except Exception as e:\n",
    "        print(f\"❌ 测试失败: {e}\")\n",
    "        return False\n",
    "\n",
    "def check_service():\n",
    "    try:\n",
    "        response = requests.get(\"http://localhost:8180/v1/models\", timeout=3)\n",
    "        return response.status_code == 200\n",
    "    except:\n",
    "        return False\n",
    "\n",
    "def setup_service():\n",
    "\n",
    "    print(\"=== ERNIE-4.5-0.3B-Paddle 服务启动 ===\")\n",
    "    \n",
    "    if check_service():\n",
    "        print(\"✅ 发现运行中的服务\")\n",
    "        if test_model():\n",
    "            print(\"🎉 服务已就绪！\")\n",
    "            return True\n",
    "        print(\"⚠️ 服务异常，重新启动\")\n",
    "    \n",
    "    process = start_fastdeploy()\n",
    "    \n",
    "    if process is None:\n",
    "        print(\"❌ 启动失败\")\n",
    "        return False\n",
    "    \n",
    "    if test_model():\n",
    "        print(\"🎊 启动成功！现在可以作为智能医疗助手为您提供疾病问诊服务！\")\n",
    "        return True\n",
    "    else:\n",
    "        print(\"❌ 启动但连接失败\")\n",
    "        return False\n",
    "\n",
    "if __name__ == \"__main__\" or True:\n",
    "    setup_service()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-20T21:19:09.459787Z",
     "iopub.status.busy": "2025-11-20T21:19:09.459526Z",
     "iopub.status.idle": "2025-11-20T21:19:09.471882Z",
     "shell.execute_reply": "2025-11-20T21:19:09.471414Z",
     "shell.execute_reply.started": "2025-11-20T21:19:09.459770Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Legacy functions for prompt creation and querying the model\n",
    "import openai\n",
    "import base64\n",
    "import json\n",
    "import requests\n",
    "\n",
    "def encode_image(image_path):\n",
    "    with open(image_path, \"rb\") as image_file:\n",
    "        return base64.b64encode(image_file.read()).decode(\"utf-8\")\n",
    "\n",
    "def create_prompt(prompt, role, conversation=None, image_path=None):\n",
    "    if conversation is None:\n",
    "        conversation = []\n",
    "    \n",
    "    message = {\n",
    "        \"role\": role,\n",
    "        \"content\": [\n",
    "            {\"type\": \"text\", \"text\": prompt},\n",
    "        ],\n",
    "    }\n",
    "\n",
    "    if image_path:\n",
    "        image_base64 = encode_image(image_path)\n",
    "        message[\"content\"].append({\"type\": \"image_url\", \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_base64}\"}})\n",
    "\n",
    "    conversation.append(message)\n",
    "\n",
    "    return conversation\n",
    "\n",
    "def assemble_prompt(system_prompt=None, user_prompt=\"\", json_structure=None, category_list=None):\n",
    "    \"\"\"\n",
    "    Assembles the complete prompt from individual components.\n",
    "    \n",
    "    Args:\n",
    "        system_prompt: System instruction for the AI\n",
    "        user_prompt: Additional user instructions\n",
    "        json_structure: Expected JSON output format\n",
    "        category_list: List of valid categories\n",
    "    \n",
    "    Returns:\n",
    "        Complete assembled prompt string\n",
    "    \"\"\"\n",
    "    if system_prompt is None:\n",
    "        system_prompt = \"You are a helpful AI assistant that extracts receipt information. Respond in a json format, other kind of output is strictly prohibited. You are not allowed to output any other text outside the json format.\"\n",
    "    \n",
    "    if json_structure is None:\n",
    "        json_structure = \"\"\"\n",
    "{\n",
    "    \"merchant\": \"Starbucks\",              // string, the name of the merchant, if not found, return null\n",
    "    \"items\": [                            // array of objects, products in the receipt, if not found, return an empty array\n",
    "        {\n",
    "            \"name\": \"Coffee\",                     // string, the name of the item\n",
    "            \"amount\": 0.00,                       // float, the price of the item\n",
    "            \"category\": \"Food & Drink\",           // string, must match the given categories\n",
    "        },\n",
    "        {\n",
    "            \"name\": \"Sandwich\",                   // string, the name of the item\n",
    "            \"amount\": 0.00,                       // float, the price of the item\n",
    "            \"category\": \"Food & Drink\",           // string, must match the given categories\n",
    "        }\n",
    "    ],\n",
    "    \"subtotal\": 0.00,                     // float, if not found, calculate the subtotal from the items\n",
    "    \"tax\": 0.00,                          // float, if not found, return 0.00\n",
    "    \"tip\": 0.00,                          // float, if not found, return 0.00\n",
    "    \"discount\": 0.00,                     // float, if not found, return 0.00\n",
    "    \"total\": 0.00,                        // float, if not found, calculate the total from subtotal + tax + tip - discount\n",
    "    \"currency\": \"USD\",                    // string, optional, if not found, return USD\n",
    "    \"note\": \"Note here\",                  // string, optional add-on information, if not found, return null\n",
    "    \"tags\": [],                           // array of strings, optional, list the tags of the receipt\n",
    "    \"fields_missing\": [],                 // array of strings, optional, list the fields that are missing (only optional fields are allowed to be missing)\n",
    "  \n",
    "    \"shared\": {                                    // optional, only output this when prompted with related feature\n",
    "        \"type\": \"none|split-even|split-custom|payer-only\",\n",
    "        \"participants\": [\n",
    "                { \"name\": \"Rena\", \"paid\": 5.45, \"owed\": 2.72 },\n",
    "                { \"name\": \"Emma\", \"paid\": 0,    \"owed\": 2.72 }\n",
    "        ]\n",
    "    }\n",
    "}\n",
    "\"\"\"\n",
    "    \n",
    "    if category_list is None:\n",
    "        category_list = [\n",
    "            \"Food & Drink\",\n",
    "            \"Groceries\",\n",
    "            \"Transportation\",\n",
    "            \"Shopping\",\n",
    "            \"Bills & Utilities\",\n",
    "            \"Entertainment\",\n",
    "            \"Education\",\n",
    "            \"Healthcare\",\n",
    "            \"Gifts & Donations\",\n",
    "            \"Travel\",\n",
    "            \"Income\",\n",
    "            \"Other\"\n",
    "        ]\n",
    "    \n",
    "    categories = \"Possible categories are: \" + ', '.join(category_list)\n",
    "    prompt = f\"{system_prompt}\\n{user_prompt}\\n{json_structure}\\n{categories}\"\n",
    "    \n",
    "    return prompt\n",
    "\n",
    "def query_model(prompt):\n",
    "    \"\"\"\n",
    "    This queries model using the OpenAI kit\n",
    "    \"\"\"\n",
    "    client = openai.Client(base_url=\"http://localhost:8180/v1\", api_key=\"null\")\n",
    "\n",
    "    response = client.chat.completions.create(\n",
    "        model=\"null\",\n",
    "        messages=prompt,\n",
    "        max_tokens=2048,\n",
    "        stream=False\n",
    "    )\n",
    "\n",
    "    return response\n",
    "\n",
    "def send_request(image_path):\n",
    "    prompt = assemble_prompt()\n",
    "    conversation = create_prompt(prompt, \"user\", image_path=image_path)\n",
    "    data = {\n",
    "        \"messages\": conversation,\n",
    "        #\"temperature\": 0.0,\n",
    "        \"metadata\": {\"enable_thinking\": False}\n",
    "\n",
    "    }\n",
    "    url = \"https://intangily-misrhymed-ellyn.ngrok-free.dev/v1/chat/completions\"\n",
    "    url = \"http://localhost:8180/v1/chat/completions\"\n",
    "    headers = {\n",
    "        \"Content-Type\": \"application/json\"\n",
    "    }\n",
    "    response = requests.post(url, headers=headers, json=data)\n",
    "    return response.json()['choices'][0]['message']['content']\n",
    "\n",
    "def send_request_enhanced(prompt=None, image_path=None, image_url=None):\n",
    "    \"\"\"\n",
    "    Enhanced version of send_request that supports image URLs and custom prompts.\n",
    "    Returns a structured JSON response.\n",
    "    \n",
    "    Args:\n",
    "        prompt: Custom prompt (if None, uses default)\n",
    "        image_path: Local path to image file\n",
    "        image_url: Direct URL to image (alternative to image_path)\n",
    "    \n",
    "    Returns:\n",
    "        Dictionary with success status and data/error\n",
    "    \"\"\"\n",
    "    try:\n",
    "        # Use default prompt if none provided\n",
    "        if prompt is None:\n",
    "            prompt = assemble_prompt()\n",
    "        \n",
    "        # Handle image URL by adding it to the message content\n",
    "        conversation = []\n",
    "        message = {\n",
    "            \"role\": \"user\",\n",
    "            \"content\": [\n",
    "                {\"type\": \"text\", \"text\": prompt},\n",
    "            ],\n",
    "        }\n",
    "        \n",
    "        if image_path:\n",
    "            # Use original function for local images\n",
    "            image_base64 = encode_image(image_path)\n",
    "            message[\"content\"].append({\n",
    "                \"type\": \"image_url\", \n",
    "                \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_base64}\"}\n",
    "            })\n",
    "        elif image_url:\n",
    "            # Add direct URL\n",
    "            message[\"content\"].append({\n",
    "                \"type\": \"image_url\", \n",
    "                \"image_url\": {\"url\": image_url}\n",
    "            })\n",
    "        \n",
    "        conversation.append(message)\n",
    "        \n",
    "        # Prepare request\n",
    "        data = {\n",
    "            \"messages\": conversation,\n",
    "            \"metadata\": {\"enable_thinking\": False}\n",
    "        }\n",
    "        \n",
    "        url = \"https://intangily-misrhymed-ellyn.ngrok-free.dev/v1/chat/completions\"\n",
    "        url = \"http://localhost:8180/v1/chat/completions\"\n",
    "        headers = {\"Content-Type\": \"application/json\"}\n",
    "        \n",
    "        response = requests.post(url, headers=headers, json=data)\n",
    "        response.raise_for_status()\n",
    "        \n",
    "        content = response.json()['choices'][0]['message']['content']\n",
    "        \n",
    "        # Try to parse as JSON\n",
    "        try:\n",
    "            parsed_content = json.loads(content)\n",
    "            return {\n",
    "                \"success\": True,\n",
    "                \"data\": parsed_content,\n",
    "                \"raw_content\": content\n",
    "            }\n",
    "        except json.JSONDecodeError:\n",
    "            return {\n",
    "                \"success\": True,\n",
    "                \"data\": None,\n",
    "                \"raw_content\": content,\n",
    "                \"warning\": \"Response is not valid JSON\"\n",
    "            }\n",
    "    \n",
    "    except Exception as e:\n",
    "        return {\n",
    "            \"success\": False,\n",
    "            \"error\": str(e),\n",
    "            \"error_type\": type(e).__name__\n",
    "        }\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-20T21:22:59.917659Z",
     "iopub.status.busy": "2025-11-20T21:22:59.917403Z",
     "iopub.status.idle": "2025-11-20T21:23:00.071203Z",
     "shell.execute_reply": "2025-11-20T21:23:00.070711Z",
     "shell.execute_reply.started": "2025-11-20T21:22:59.917641Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# FastAPI Wrapper for send_request_enhanced\n",
    "from fastapi import FastAPI, HTTPException, BackgroundTasks\n",
    "from pydantic import BaseModel\n",
    "from typing import Optional\n",
    "import uvicorn\n",
    "import threading\n",
    "import json\n",
    "\n",
    "# Define request model\n",
    "class ReceiptRequest(BaseModel):\n",
    "    prompt: Optional[str] = None\n",
    "    image_path: Optional[str] = None\n",
    "    image_url: Optional[str] = None\n",
    "\n",
    "# Create FastAPI app\n",
    "app = FastAPI(title=\"Receipt OCR API\", version=\"1.0.0\")\n",
    "\n",
    "@app.post(\"/process\")\n",
    "async def process_receipt(request: ReceiptRequest):\n",
    "    \"\"\"\n",
    "    Process a receipt image and extract information.\n",
    "    \n",
    "    Args:\n",
    "        prompt: Custom prompt (optional, uses default if not provided)\n",
    "        image_path: Local path to image file (optional)\n",
    "        image_url: URL to image (optional)\n",
    "    \n",
    "    Returns:\n",
    "        The extracted receipt content\n",
    "    \"\"\"\n",
    "    try:\n",
    "        # Validate that at least one image source is provided\n",
    "        if not request.image_path and not request.image_url:\n",
    "            raise HTTPException(\n",
    "                status_code=400, \n",
    "                detail=\"Either image_path or image_url must be provided\"\n",
    "            )\n",
    "        \n",
    "        # Modified send_request_enhanced to use local endpoint\n",
    "        def send_to_local_llm(prompt=None, image_path=None, image_url=None):\n",
    "            \"\"\"Send request to local LLM endpoint at localhost:8180\"\"\"\n",
    "            try:\n",
    "                if prompt is None:\n",
    "                    prompt = assemble_prompt()\n",
    "                \n",
    "                conversation = []\n",
    "                message = {\n",
    "                    \"role\": \"user\",\n",
    "                    \"content\": [\n",
    "                        {\"type\": \"text\", \"text\": prompt},\n",
    "                    ],\n",
    "                }\n",
    "                \n",
    "                if image_path:\n",
    "                    image_base64 = encode_image(image_path)\n",
    "                    message[\"content\"].append({\n",
    "                        \"type\": \"image_url\", \n",
    "                        \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_base64}\"}\n",
    "                    })\n",
    "                elif image_url:\n",
    "                    message[\"content\"].append({\n",
    "                        \"type\": \"image_url\", \n",
    "                        \"image_url\": {\"url\": image_url}\n",
    "                    })\n",
    "                \n",
    "                conversation.append(message)\n",
    "                \n",
    "                data = {\n",
    "                    \"messages\": conversation,\n",
    "                    \"metadata\": {\"enable_thinking\": False}\n",
    "                }\n",
    "                \n",
    "                # Use local endpoint instead of ngrok\n",
    "                url = \"http://localhost:8180/v1/chat/completions\"\n",
    "                headers = {\"Content-Type\": \"application/json\"}\n",
    "                \n",
    "                response = requests.post(url, headers=headers, json=data, timeout=60)\n",
    "                response.raise_for_status()\n",
    "                \n",
    "                content = response.json()['choices'][0]['message']['content']\n",
    "                return {\"success\": True, \"content\": content}\n",
    "            \n",
    "            except Exception as e:\n",
    "                return {\"success\": False, \"error\": str(e), \"error_type\": type(e).__name__}\n",
    "        \n",
    "        # Call the function\n",
    "        result = send_to_local_llm(\n",
    "            prompt=request.prompt,\n",
    "            image_path=request.image_path,\n",
    "            image_url=request.image_url\n",
    "        )\n",
    "        \n",
    "        if result[\"success\"]:\n",
    "            return {\"success\": True, \"content\": result[\"content\"]}\n",
    "        else:\n",
    "            raise HTTPException(status_code=500, detail=result[\"error\"])\n",
    "    \n",
    "    except HTTPException:\n",
    "        raise\n",
    "    except Exception as e:\n",
    "        raise HTTPException(status_code=500, detail=str(e))\n",
    "\n",
    "@app.get(\"/health\")\n",
    "async def health_check():\n",
    "    \"\"\"Health check endpoint\"\"\"\n",
    "    return {\"status\": \"healthy\", \"service\": \"Receipt OCR API\"}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-20T21:23:09.749201Z",
     "iopub.status.busy": "2025-11-20T21:23:09.748922Z",
     "iopub.status.idle": "2025-11-20T21:23:09.753498Z",
     "shell.execute_reply": "2025-11-20T21:23:09.752697Z",
     "shell.execute_reply.started": "2025-11-20T21:23:09.749181Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Run FastAPI in background\n",
    "def run_fastapi():\n",
    "    uvicorn.run(app, host=\"0.0.0.0\", port=8000, log_level=\"info\")\n",
    "\n",
    "print(\"🚀 Starting FastAPI wrapper on port 8000...\")\n",
    "api_thread = threading.Thread(target=run_fastapi, daemon=True)\n",
    "api_thread.start()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-11-20T21:23:20.877043Z",
     "iopub.status.busy": "2025-11-20T21:23:20.876779Z",
     "iopub.status.idle": "2025-11-20T21:23:33.932556Z",
     "shell.execute_reply": "2025-11-20T21:23:33.932089Z",
     "shell.execute_reply.started": "2025-11-20T21:23:20.877022Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Ngrok setup to expose FastAPI\n",
    "from pyngrok import ngrok\n",
    "\n",
    "# Set your ngrok auth token here\n",
    "ngrok.set_auth_token(\"\")\n",
    "\n",
    "public_url = ngrok.connect(8000, \"http\")\n",
    "public_url"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法.  <br>\n",
    "Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions. "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
