# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law of a.tool_schema.model_validate(tool_schema)
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# --- Standard Library Imports ---
import asyncio
import base64
import io
import logging
import os
import threading
import tempfile
import traceback
import uuid
from contextlib import ExitStack
from enum import Enum
from math import ceil, floor
from typing import Any, Callable, Optional, TypeVar

# --- Third-Party Imports ---
import aiohttp      # ASYNC CHANGE: Replaces 'requests' for non-blocking HTTP calls
import cv2
import numpy as np
import ray
import ray.actor
import torch
from openai import AsyncOpenAI # ASYNC CHANGE: Use the async version of the OpenAI client
from PIL import Image

# --- Local Application Imports ---
from qwen_vl_utils import fetch_image
from .base_tool import BaseTool
from .schemas import OpenAIFunctionToolSchema, ToolResponse

# --- Logger Configuration ---
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "INFO"))

# --- Type Hinting ---
T = TypeVar("T")

# =====================================================================================
# SECTION 1: CONCURRENCY AND RATE LIMITING INFRASTRUCTURE
# =====================================================================================

class PoolMode(Enum):
    ThreadMode = 1
    ProcessMode = 2

@ray.remote(concurrency_groups={"acquire": 1, "release": 10})
class TokenBucketWorker:
    def __init__(self, rate_limit: int):
        self.rate_limit = rate_limit
        self.current_count = 0
        self._semaphore = threading.Semaphore(rate_limit)

    @ray.method(concurrency_group="acquire")
    def acquire(self):
        self._semaphore.acquire()
        self.current_count += 1

    @ray.method(concurrency_group="release")
    def release(self):
        self._semaphore.release()
        self.current_count -= 1

    def get_current_count(self):
        return self.current_count

class VisualExecutionWorker:
    def __init__(self, enable_global_rate_limit=True, rate_limit=10):
        self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None

    def _init_rate_limit(self, rate_limit):
        return TokenBucketWorker.options(name="visual-tool-rate-limiter", get_if_exists=True).remote(rate_limit)

    # ASYNC CHANGE: The execute method is now async to handle non-blocking operations.
    async def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T:
        if self.rate_limit_worker:
            with ExitStack() as stack:
                stack.callback(self.rate_limit_worker.release.remote)
                # await the remote call
                await self.rate_limit_worker.acquire.remote()
                try:
                    # await the actual function, which could be a coroutine (for network calls)
                    # or a regular function (for CPU-bound tasks like cropping)
                    return await fn(*fn_args, **fn_kwargs)
                except Exception as e:
                    logger.error(f"Error executing function in visualexe '{fn.__name__}': {e}")
                    logger.error(f"Full traceback: {traceback.format_exc()}")
                    raise
        else:
            # await the function directly if no rate limiting
            return await fn(*fn_args, **fn_kwargs)

def init_visual_execution_pool(num_workers: int, **kwargs):
    return (
        ray.remote(VisualExecutionWorker)
        .options(max_concurrency=num_workers)
        .remote(**kwargs)
    )

# =====================================================================================
# SECTION 2: BASE CLASS FOR VISUAL TOOLS
# =====================================================================================

class BaseVisualTool(BaseTool):
    def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
        super().__init__(config, tool_schema)
        self._instance_dict = {}
        self.num_workers = config.get("num_workers", 20)
        self.rate_limit = config.get("rate_limit", 50)
        self.timeout = config.get("timeout", 30)
        self.enable_global_rate_limit = config.get("enable_global_rate_limit", True)
        self.execution_pool = init_visual_execution_pool(
            num_workers=self.num_workers,
            enable_global_rate_limit=self.enable_global_rate_limit,
            rate_limit=self.rate_limit,
        )

    async def create(self, instance_id: Optional[str] = None, **kwargs) -> tuple[str, ToolResponse]:
        if instance_id is None:
            instance_id = str(uuid.uuid4())
        create_kwargs = kwargs.get("create_kwargs", {})
        if create_kwargs:
            kwargs.update(create_kwargs)
        image = kwargs.get("image")
        if image is None:
            raise ValueError("Missing required 'image' parameter in kwargs")
        logger.info(f"Creating new instance {instance_id} with image")
        if isinstance(image, list):
            img = [fetch_image({"image": img}) for img in image]
        else:
            img = [fetch_image({"image": image})]
        self._instance_dict[instance_id] = {"image": img, "response": "", "reward": 0.0}
        return instance_id, ToolResponse()

    async def release(self, instance_id: str, **kwargs) -> None:
        if instance_id in self._instance_dict:
            del self._instance_dict[instance_id]

# =====================================================================================
# SECTION 3: CONCRETE TOOL IMPLEMENTATIONS
# =====================================================================================

class ImageZoomInTool(BaseVisualTool):
    MIN_DIMENSION = 28

    def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
        super().__init__(config, tool_schema)

    @staticmethod
    def ensure_pil_image(image_data: Any) -> Image.Image:
        if isinstance(image_data, Image.Image):
            return image_data
        if isinstance(image_data, torch.Tensor):
            image_data = image_data.cpu().numpy()
        if isinstance(image_data, np.ndarray):
            if image_data.dtype in [np.float32, np.float64]:
                image_data = (image_data * 255).astype(np.uint8)
            if image_data.ndim == 3 and image_data.shape[0] in [3, 4]:
                image_data = image_data.transpose(1, 2, 0)
            return Image.fromarray(image_data)
        raise TypeError(f"Unsupported image data type: {type(image_data)}")

    # ASYNC CHANGE: The helper is now async to match the worker's execute signature.
    # Even though it's CPU-bound, this makes the interface consistent.
    @staticmethod
    async def _perform_crop(image: Image.Image, bbox: list[float]) -> Image.Image:
        return image.crop(bbox)

    def _maybe_resize_bbox(self, bbox_2d, image_width, image_height) -> Optional[list[float]]:
        left, top, right, bottom = bbox_2d
        left, top = max(0.0, float(left)), max(0.0, float(top))
        right, bottom = min(float(image_width), float(right)), min(float(image_height), float(bottom))
        if not self._validate_bbox(left, top, right, bottom): return None
        current_bbox = [left, top, right, bottom]
        height, width = bottom - top, right - left
        if height < self.MIN_DIMENSION or width < self.MIN_DIMENSION:
            center_x, center_y = (left + right) / 2.0, (top + bottom) / 2.0
            min_dim = min(height, width)
            if min_dim == 0: return None
            ratio = self.MIN_DIMENSION / min_dim
            target_width, target_height = width * ratio, height * ratio
            if target_width > image_width:
                scale_down = image_width / target_width
                target_width, target_height = image_width, target_height * scale_down
            if target_height > image_height:
                scale_down = image_height / target_height
                target_height, target_width = image_height, target_width * scale_down
            new_left, new_top = center_x - target_width / 2.0, center_y - target_height / 2.0
            if new_left < 0: new_left = 0
            if new_top < 0: new_top = 0
            if new_left + target_width > image_width: new_left = image_width - target_width
            if new_top + target_height > image_height: new_top = image_height - target_height
            new_right, new_bottom = new_left + target_width, new_top + target_height
            current_bbox = [floor(new_left), floor(new_top), ceil(new_right), ceil(new_bottom)]
        final_left, final_top, final_right, final_bottom = current_bbox
        if not self._validate_bbox(final_left, final_top, final_right, final_bottom): return None
        if floor(final_bottom) - floor(final_top) < self.MIN_DIMENSION or floor(final_right) - floor(final_left) < self.MIN_DIMENSION: return None
        return current_bbox

    def _validate_bbox(self, left: float, top: float, right: float, bottom: float) -> bool:
        if not (left < right and top < bottom): return False
        height, width = bottom - top, right - left
        if min(height, width) <= 1: return False
        if max(height, width) / min(height, width) > 100: return False
        return True

    async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
        # ... (Parameter validation remains the same) ...
        bbox_2d = parameters.get("bbox_2d")
        if not bbox_2d or len(bbox_2d) != 4:
            return ToolResponse(text="Error: bbox_2d must be a list of 4 numbers."), -0.05, {"success": False}
        image_id = parameters.get("image_id", 0)
        if image_id is not None and not isinstance(image_id,int):
            return ToolResponse(text=f"Error during image crop: image_id {image_id} is not int."), -0.05, {"success": False}
        elif image_id!=0:
            if image_id>=len(self._instance_dict[instance_id]["image"]) or image_id<0:
                return ToolResponse(text=f"Error during image crop: image_id {image_id} is out of range."), -0.05, {"success": False}
        
        try:
            original_image_data = self._instance_dict[instance_id]["image"][image_id]
            image = self.ensure_pil_image(original_image_data)
        except (TypeError, ValueError) as e:
            return ToolResponse(text=f"Error: Could not handle the input image format. {e}"), -0.05, {"success": False}
        
        resized_bbox = self._maybe_resize_bbox(bbox_2d, image_width=image.width, image_height=image.height)
        if resized_bbox is None:
            return ToolResponse(text=f"Error: Bbox {bbox_2d} is invalid."), -0.05, {"success": False}

        try:
            # The remote call is now awaited on an async method.
            cropped_image = await self.execution_pool.execute.remote(self._perform_crop, image, resized_bbox)
        except Exception as e:
            return ToolResponse(text=f"Error during image crop: {e}"), -0.05, {"success": False}
        # ... (Rest of the logic remains the same) ...
        if cropped_image is None:
            return ToolResponse(text="Error: Cropping operation failed."), -0.05, {"success": False}
        image_width, image_height = cropped_image.size
        if image_width / image_height > 100 or image_height / image_width > 100:
            return ToolResponse(text=f"Error: The aspect ratio of the original image is too extreme (width: {image_width}, height: {image_height})."), -0.05, {"success": False}
        label = parameters.get("label", "")
        response_text = f"Successfully use the tool! Zoomed in on the image to the region {bbox_2d}." + (f" with label {label}." if label else "")+ f"with image_id: {image_id}"
        return ToolResponse(image=[cropped_image], text=response_text), 0.0, {"success": True}

# ASYNC CHANGE: This helper function is now async and uses aiohttp
import tempfile

import os
import traceback

async def _call_depth_api(image: Image.Image, api_url: str, timeout: int, **params) -> Image.Image:
    temp_dir = "/data/home/zdhs0094/data-from-5h100/verl/recipe/expert/tem_pic/"
    os.makedirs(temp_dir, exist_ok=True)
    unique_name = f"depth_input_{uuid.uuid4().hex}.png"
    temp_path = os.path.join(temp_dir, unique_name)
    
    try:
        image.save(temp_path, format="PNG")
        if not api_url:
            raise ValueError("API URL for the depth model is not configured properly.")

        async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
            async with session.post(api_url, json={"image": temp_path}) as response:
                response.raise_for_status()
                response_data = (await response.json()).get("depth_image")

        if response_data is None:
            raise ValueError("API response did not contain the 'depth_image' key.")
        
        # ... (Rest of the function logic is the same)
        depth_image = None
        if isinstance(response_data, str):
            image_bytes = base64.b64decode(response_data)
            depth_image = Image.open(io.BytesIO(image_bytes))
        elif isinstance(response_data, list):
            depth_array = np.array(response_data, dtype=np.uint8)
            if depth_array.ndim != 3 or depth_array.shape[2] not in [3, 4]:
                raise ValueError(f"Reconstructed array has an invalid shape for an image: {depth_array.shape}")
            depth_image = Image.fromarray(depth_array)
        else:
            raise TypeError(f"Received unexpected data type for 'depth_image': {type(response_data)}")
        width, height = depth_image.size
        if width/height > 100 or height/width > 100:
            raise ValueError(f"The aspect ratio of the depth image is too extreme (width: {width}, height: {height}).")
        logger.warning(f"Successfully created depth image with final size (width, height): {depth_image.size}")
        return depth_image
    except Exception as e:
        logger.error(f"Error in _call_depth_api: {e}")
        raise
    finally:
        if os.path.exists(temp_path):
            os.remove(temp_path)

class DepthEstimationTool(BaseVisualTool):
    def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
        super().__init__(config, tool_schema)
        self.api_url = config.get("depth_api_url")
        if not self.api_url:
            raise ValueError("Configuration for DepthEstimationTool must include 'depth_api_url'")

    async def execute(self, instance_id: str, parameters: dict[str, Any],**kwargs) -> tuple[ToolResponse, float, dict]:
        # ... (Parameter validation remains the same) ...
        image_id = parameters.get("image_id", 0)
        if image_id is not None and not isinstance(image_id,int):
            return ToolResponse(text=f"Error during depth estimation: image_id {image_id} is not int."), -0.05, {"success": False}
        elif image_id!=0:
            if image_id>=len(self._instance_dict[instance_id]["image"]) or image_id<0:
                return ToolResponse(text=f"Error during depth estimation: image_id {image_id} is out of range."), -0.05, {"success": False}
        
        image = self._instance_dict[instance_id]["image"][image_id]
        try:
            depth_image_ref = self.execution_pool.execute.remote(_call_depth_api, image, self.api_url, self.timeout)
            depth_image = await depth_image_ref
            if depth_image is None:
                return ToolResponse(text="Error: Depth estimation API returned no results."), -0.05, {"success": False}
            response_text = f"Successfully use the tool! Generated the depth map."+f"with image_id: {image_id}"
            return ToolResponse(image=[depth_image], text=response_text), 0.0, {"success": True}
        except Exception as e:
            return ToolResponse(text=f"Error during depth estimation: {e}"), -0.05, {"success": False}

# ASYNC CHANGE: This helper function is now async and uses aiohttp
async def _call_sam2_api(image: Image.Image, api_url: str, timeout: int, **params) -> list[Image.Image]:
    img_bgr = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
    _, img_encoded = cv2.imencode(".jpg", img_bgr)
    img_base64 = base64.b64encode(img_encoded).decode("utf-8")
    request_json = {
        "image": img_base64,
        "task_type": params.get("task_type", "automatic"),
        "point_coords": np.array(params.get("point_coords", [])).tolist() if params.get("point_coords") else None,
        "point_labels": np.array(params.get("point_labels", [])).tolist() if params.get("point_labels") else None,
        "input_boxes": np.array(params.get("input_boxes", [])).tolist() if params.get("input_boxes") else None,
        "multimask_output": params.get("multimask_output", False),
    }

    async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
        async with session.post(api_url, json=request_json) as response:
            response.raise_for_status()
            response_data = await response.json()
    
    result = response_data.get("results")
    if not result:
        raise ValueError("API response did not contain segmentation 'result'")
    
    if isinstance(result, list):
        images = []
        for img_base64 in result:
            nparr = np.frombuffer(base64.b64decode(img_base64), np.uint8)
            masked_img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            images.append(Image.fromarray(cv2.cvtColor(masked_img_bgr, cv2.COLOR_BGR2RGB)))
        return images
    else:
        nparr = np.frombuffer(base64.b64decode(result), np.uint8)
        masked_img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        return [Image.fromarray(cv2.cvtColor(masked_img_bgr, cv2.COLOR_BGR2RGB))]

class ImageSegmentationTool(BaseVisualTool):
    def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
        super().__init__(config, tool_schema)
        self.api_url = config.get("sam2_api_url")
        if not self.api_url:
             raise ValueError("Configuration for ImageSegmentationTool must include 'sam2_api_url'")

    async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
        # ... (Parameter validation remains the same) ...
        image_id = parameters.get("image_id", 0)
        if image_id is not None and not isinstance(image_id,int):
            return ToolResponse(text=f"Error during image segmentation: image_id {image_id} is not int."), -0.05, {"success": False}
        elif image_id!=0:
            if image_id>=len(self._instance_dict[instance_id]["image"]) or image_id<0:
                return ToolResponse(text=f"Error during image segmentation: image_id {image_id} is out of range."), -0.05, {"success": False}
        
        image = self._instance_dict[instance_id]["image"][image_id]
        try:
            segmented_image_ref = self.execution_pool.execute.remote(
                _call_sam2_api, image, self.api_url, self.timeout, **parameters
            )
            segmented_image = await segmented_image_ref
            if segmented_image is None or len(segmented_image) == 0:
                return ToolResponse(text="Error: Segmentation API returned no results."), -0.05, {"success": False}
            response_text = "Successfully use the tool! Segmented the image."+f"with image_id: {image_id}"
            return ToolResponse(image=segmented_image, text=response_text), 0.0, {"success": True}
        except Exception as e:
            return ToolResponse(text=f"Error during image segmentation: {e}"), -0.05, {"success": False}

# ASYNC CHANGE: This helper function is now async and uses aiohttp
async def _call_llmdet_api(image: Image.Image, api_url: str, timeout: int, **params) -> Optional[tuple[Image.Image, Any]]:
    try:
        img_bgr = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
        _, img_encoded = cv2.imencode(".jpg", img_bgr)
        img_base64 = base64.b64encode(img_encoded).decode("utf-8")
        text = params.get("text")
        text = [text] if isinstance(text, str) else text
        if not text or not isinstance(text, list) or not all(isinstance(p, str) for p in text):
            raise ValueError("Error: 'text' parameter must be a non-empty list of strings.")
        
        request_json = {"image": img_base64, "text": text, "threshold": params.get("threshold", 0.4)}
        
        logger.info(f"Sending request to LLMDet API at {api_url} with prompts: {request_json['text']}, threshold: {request_json['threshold']},image size:{image.size}")
        async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
            async with session.post(api_url, json=request_json) as response:
                response.raise_for_status()
                response_data = await response.json()
        
        boxed_image_b64 = response_data.get("boxed_image")
        box_info = response_data.get("detections")
        if not boxed_image_b64:
            raise ValueError("LLMDet API response did not contain the 'boxed_image' key.")

        image_bytes = base64.b64decode(boxed_image_b64)
        result_image = Image.open(io.BytesIO(image_bytes))
        logger.info(f"Successfully received and decoded boxed image from LLMDet API. Size: {result_image.size}")
        return result_image, box_info
    except Exception as e:
        logger.error(f"Error in _call_llmdet_api: {e}")
        raise

class LLMDetTool(BaseVisualTool):
    def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
        super().__init__(config, tool_schema)
        self.api_url = config.get("api_url")
        if not self.api_url:
             raise ValueError("Configuration for LLMDetTool must include 'llmdet_api_url'")

    async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
        # ... (Parameter validation remains the same) ...
        image_id = parameters.get("image_id", 0)
        if image_id is not None and not isinstance(image_id,int):
            return ToolResponse(text=f"Error during object detection: image_id {image_id} is not int."), -0.05, {"success": False}
        elif image_id!=0:
            if image_id>=len(self._instance_dict[instance_id]["image"]) or image_id<0:
                return ToolResponse(text=f"Error during object detection: image_id {image_id} is out of range."), -0.05, {"success": False}
        
        image = self._instance_dict[instance_id]["image"][image_id]
        prompts = parameters.get("text")
        if not prompts or not isinstance(prompts, list) or not all(isinstance(p, str) for p in prompts):
            error_msg = "Error: 'text' parameter must be a non-empty list of strings."
            return ToolResponse(text=error_msg), -0.05, {"success": False}
        
        try:
            result_ref = self.execution_pool.execute.remote(_call_llmdet_api, image, self.api_url, self.timeout, **parameters)
            api_result = await result_ref
            if api_result is None:
                return ToolResponse(text="Error: LLMDet API returned no results."), -0.05, {"success": False}
            
            boxed_image, box_info = api_result
            if boxed_image is None:
                return ToolResponse(text="Error: LLMDet API returned no image."), -0.05, {"success": False}
            
            str_of_box_info = str(box_info)
            response_text = f"Successfully use the tool! Detected objects for prompts: {prompts}. Detection details: {str_of_box_info}"+f"with image_id: {image_id}"
            return ToolResponse(image=[boxed_image], text=response_text), 0.0, {"success": True}
        except Exception as e:
            return ToolResponse(text=f"Error during object detection: {e}"), -0.05, {"success": False}

# ASYNC CHANGE: The helper is now async to match the worker's execute signature.
async def _perform_edge_detection(image: Image.Image, low_threshold: int, high_threshold: int) -> Image.Image:
    try:
        img_rgb_np = np.array(image.convert("RGB"))
        img_gray = cv2.cvtColor(img_rgb_np, cv2.COLOR_RGB2GRAY)
        edges = cv2.Canny(img_gray, low_threshold, high_threshold)
        edges_rgb = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
        result_image = Image.fromarray(edges_rgb)
        return result_image
    except Exception as e:
        logger.error(f"Error during Canny edge detection: {e}")
        raise

class EdgeDetectionTool(BaseVisualTool):
    def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
        super().__init__(config, tool_schema)

    async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
        # ... (Parameter validation remains the same) ...
        image_id = parameters.get("image_id", 0)
        if image_id is not None and not isinstance(image_id,int):
            return ToolResponse(text=f"Error during edge detection: image_id {image_id} is not int."), -0.05, {"success": False}
        elif image_id!=0:
            if image_id>=len(self._instance_dict[instance_id]["image"]) or image_id<0:
                return ToolResponse(text=f"Error during edge detection: image_id {image_id} is out of range."), -0.05, {"success": False}
        
        image = self._instance_dict[instance_id]["image"][image_id]
        low_threshold = parameters.get("low_threshold", 100)
        high_threshold = parameters.get("high_threshold", 200)

        try:
            edge_image_ref = self.execution_pool.execute.remote(_perform_edge_detection, image, low_threshold, high_threshold)
            edge_image = await edge_image_ref
            if edge_image is None:
                return ToolResponse(text="Error: Edge detection operation failed."), -0.05, {"success": False}
            response_text = f"Successfully use the tool! Detected edges using thresholds ({low_threshold}, {high_threshold})."+f"with image_id: {image_id}"
            return ToolResponse(image=[edge_image], text=response_text), 0.0, {"success": True}
        except Exception as e:
            return ToolResponse(text=f"Error during edge detection: {e}"), -0.05, {"success": False}
from openai import OpenAI
from io import BytesIO

# =====================================================================================
# SECTION 5: OCR DETECTION TOOL (Corrected vLLM Service Implementation)
# =====================================================================================

dict_promptmode_to_prompt = {
    # prompt_layout_all_en: parse all layout info in json format.
    "layout_all_en": """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox.

1. Bbox format: [x1, y1, x2, y2]

2. Layout Categories: The possible categories are ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title'].

3. Text Extraction & Formatting Rules:
    - Picture: For the 'Picture' category, the text field should be omitted.
    - Formula: Format its text as LaTeX.
    - Table: Format its text as HTML.
    - All Others (Text, Title, etc.): Format their text as Markdown.

4. Constraints:
    - The output text must be the original text from the image, with no translation.
    - All layout elements must be sorted according to human reading order.

5. Final Output: The entire output must be a single JSON object.
""",

    # prompt_layout_only_en: layout detection
    "layout_only_en": """Please output the layout information from this PDF image, including each layout's bbox and its category. The bbox should be in the format [x1, y1, x2, y2]. The layout categories for the PDF document include ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. Do not output the corresponding text. The layout result should be in JSON format.""",

    # prompt_layout_only_en: parse ocr text except the Page-header and Page-footer
    "ocr": """Extract the text content from this image.""",
    "grounding_ocr": """Extract text from the given bounding box on the image (format: [x1, y1, x2, y2]).\nBounding Box:\n""",
}

def PILimage_to_base64(image, format='PNG'):
    buffered = BytesIO()
    image.save(buffered, format=format)
    base64_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
    return f"data:image/{format.lower()};base64,{base64_str}"

# ASYNC CHANGE: This helper function is now async and uses AsyncOpenAI client
async def _call_ocr_api_vllm(image: Image.Image, api_url: str, timeout: int, **params) -> str:
    try:
        base_addr = api_url.rsplit('/', 2)[0]
        client = AsyncOpenAI(api_key="EMPTY", base_url=base_addr)
        base64_image_url = PILimage_to_base64(image)
        prompt_type = params.get("prompt", "layout_all_en")
        prompt = dict_promptmode_to_prompt.get(prompt_type, dict_promptmode_to_prompt["layout_all_en"])
        if prompt_type == "grounding_ocr":
            bbox_2d = params.get("bbox_2d")
            if not bbox_2d or len(bbox_2d) != 4:
                raise ValueError("For 'grounding_ocr' prompt_type, 'bbox_2d' parameter is required.")
            prompt += str(bbox_2d)
        
        formatted_prompt = f"<|img|><|imgpad|><|endofimg|>{prompt}"
        messages = [{"role": "user", "content": [{"type": "image_url", "image_url": {"url": base64_image_url}}, {"type": "text", "text": formatted_prompt}]}]
        
        response = await client.chat.completions.create(
            model="model", messages=messages, max_tokens=params.get("max_tokens", 8192),
            temperature=params.get("temperature", 0.1), timeout=timeout,
        )

        ocr_result_text = response.choices[0].message.content
        if not ocr_result_text:
            raise ValueError("OCR API response did not contain valid content.")
        return ocr_result_text
    except Exception as e:
        logger.error(f"Error in _call_ocr_api_vllm: {e}")
        raise

class OCRDetectionTool(BaseVisualTool):
    def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
        super().__init__(config, tool_schema)
        self.api_url = config.get("api_url")
        if not self.api_url:
             raise ValueError("Configuration for OCRDetectionTool must include 'ocr_api_url'")

    async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
        # ... (Parameter validation remains the same) ...
        image_id = parameters.get("image_id", 0)
        if image_id is not None and not isinstance(image_id,int):
            return ToolResponse(text=f"Error during OCR processing: image_id {image_id} is not int."), -0.05, {"success": False}
        elif image_id!=0:
            if image_id>=len(self._instance_dict[instance_id]["image"]) or image_id<0:
                return ToolResponse(text=f"Error during OCR processing: image_id {image_id} is out of range."), -0.05, {"success": False}

        image = self._instance_dict[instance_id]["image"][image_id]
        try:
            ocr_result_ref = self.execution_pool.execute.remote(
                _call_ocr_api_vllm, image, self.api_url, self.timeout, **parameters
            )
            ocr_text_output = await ocr_result_ref
            if ocr_text_output is None:
                return ToolResponse(text="Error: OCR API returned no results."), -0.05, {"success": False}
            
            response_text = f"Successfully use the tool! Extracted text and layout information from the image:\n\n{ocr_text_output}"+f"with image_id: {image_id}"
            return ToolResponse(text=response_text), 0.0, {"success": True}
        except Exception as e:
            return ToolResponse(text=f"Error during OCR processing: {e}"), -0.05, {"success": False}