"""WMS核心API服务模块。
该模块实现了WMS3D系统的核心功能，包括：
1. 物品创建、更新和查询
2. 库位管理和库存统计
3. 位置验证和事件记录
遵循DDD（领域驱动设计）和微服务架构原则，提供原子级操作接口供外部编排平台调用。
"""

import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import AsyncGenerator
from datetime import UTC, datetime, timezone
from typing import Dict, List, Optional, Set, Tuple
from uuid import UUID, uuid4

from fastapi import Depends, FastAPI, HTTPException, Query
from fastapi import status as fastapi_status
from geoalchemy2 import functions as geofunc
from geoalchemy2.elements import WKBElement
from geoalchemy2.shape import to_shape
from pydantic import BaseModel, Field
from shapely import wkb
from shapely.geometry import Point
from sqlalchemy import func
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select

from ..core.exceptions import (
    BusinessRuleError,
    LocationNotFoundError,
)
from ..domain.models import (
    Constants,
    DocumentDataRequest,
    Event,
    Location,
    Object,
    ObjectLifeCycle,
    ObjectsOperationRequest,
    ObjectStatus,
    OperationContext,
    Position,
    SensorSource,
)
from ..infrastructure.database import AsyncSessionLocal

logger = logging.getLogger(__name__)


async def get_async_db() -> AsyncGenerator[AsyncSession, None]:
    """获取异步数据库会话。
    该函数是一个异步生成器，用于创建和管理数据库会话的生命周期。
    确保在使用完毕后正确关闭数据库连接。
    Yields:
        AsyncSession: 异步数据库会话对象
    """
    async with AsyncSessionLocal() as session:
        try:
            yield session
        finally:
            await session.close()


async def wkb_to_xyz(wkb_hex: str | bytes) -> list[float]:
    """把 WKB 十六进制字符串或 bytes 直接解析成 (x, y, z)"""
    if isinstance(wkb_hex, str):
        wkb_bytes = bytes.fromhex(wkb_hex)
    else:
        wkb_bytes = wkb_hex

    pt: Point = wkb.loads(wkb_bytes)
    return [pt.x, pt.y, pt.z]


async def wkb_element_to_xyz(wkb_elem: WKBElement) -> list[float]:
    # 取到十六进制
    hex_str = wkb_elem.desc  # 或 str(wkb_elem)
    return await wkb_to_xyz(hex_str)  # 复用刚才的纯 Python 版本


class ObjectCreateRequest(BaseModel):
    """创建新物品的请求模型。
    用于通过API创建新的物品对象，包含物品的初始状态和位置信息。
    """

    object_id: UUID | None = Field(None, description="物品ID（可选，如果未提供将自动生成）")
    sku_id: str = Field(..., description="SKU ID")
    batch_number: str | None = Field(None, description="批次号")
    initial_position: Position = Field(..., description="初始3D位置")
    status: ObjectStatus = Field(ObjectStatus.PENDING, description="初始状态")
    operator_id: str | None = Field(None, description="操作员ID")
    documents: dict | None = Field(None, description="关联单据信息")
    openid: UUID | None = Field(None, description="Open ID for warehouse item identification")


# API响应码定义
class ResponseCode:
    """API响应码常量"""

    SUCCESS = 200
    # 业务规则错误 (400-499)
    INVALID_POSITION = 400
    INVALID_STATUS_TRANSITION = 401
    SAME_STATUS_TRANSITION = 402
    OBJECT_NOT_FOUND = 403
    SKU_NOT_FOUND = 404
    INVALID_QUANTITY = 405
    DOCUMENT_ALREADY_PROCESSED = 406
    WAREHOUSE_FULL = 407
    # 系统错误 (500-599)
    INTERNAL_ERROR = 500
    DATABASE_ERROR = 501
    NETWORK_ERROR = 502


class APIResponse(BaseModel):
    """统一的API响应格式"""

    code: int = Field(..., description="响应码，200表示成功，其他表示失败")
    message: str = Field(..., description="响应消息")
    data: dict | None = Field(None, description="响应数据，成功时包含业务数据")
    timestamp: str = Field(..., description="响应时间戳")


class SuccessResponse(APIResponse):
    """成功响应"""

    def __init__(self, data: dict = None, message: str = "操作成功"):
        super().__init__(
            code=ResponseCode.SUCCESS,
            message=message,
            data=data,
            timestamp=datetime.now(UTC).isoformat(),
        )


class ErrorResponse(APIResponse):
    """错误响应"""

    def __init__(self, code: int, message: str, data: dict = None):
        super().__init__(
            code=code, message=message, data=data, timestamp=datetime.now(UTC).isoformat()
        )


class PositionValidator:
    """位置验证器"""

    @staticmethod
    async def validate_all_positions(positions: list[list[float]], db: AsyncSession) -> None:
        """验证所有位置是否在有效库位内"""
        for pos in positions:
            await PositionValidator._validate_single_position(pos, db)

    @staticmethod
    async def _validate_single_position(pos: list[float], db: AsyncSession) -> None:
        """验证单个位置"""
        # 使用正确的 GeoAlchemy2 函数
        point = geofunc.ST_SetSRID(
            geofunc.ST_MakePoint(pos[0], pos[1], pos[2]), Constants.COORDINATE_SRID
        )
        query = select(Location).where(geofunc.ST_Contains(Location.boundary_geometry, point))
        result = await db.execute(query)
        loc = result.scalar_one_or_none()
        if not loc:
            raise BusinessRuleError(
                f"位置 {pos} 不在任何有效库位内",
                rule_name="POSITION_VALIDATION",
            )


class GeometryHelper:
    """几何数据处理助手"""

    @staticmethod
    def build_point(pos: list[float]):
        """构建空间点对象"""
        return geofunc.ST_SetSRID(geofunc.ST_MakePoint(*pos), Constants.COORDINATE_SRID)

    @staticmethod
    async def extract_coordinates_batch(
        objects: list[Object], db: AsyncSession
    ) -> dict[UUID, list[float]]:
        """批量提取对象坐标信息"""
        if not objects:
            return {}
        object_ids = [obj.object_id for obj in objects]
        coordinates_query = select(
            Object.object_id,
            geofunc.ST_X(Object.current_position).label("x"),
            geofunc.ST_Y(Object.current_position).label("y"),
            geofunc.ST_Z(Object.current_position).label("z"),
        ).where(Object.object_id.in_(object_ids))
        coordinates_result = await db.execute(coordinates_query)
        coordinates_list = coordinates_result.fetchall()
        return {
            row.object_id: [float(row.x), float(row.y), float(row.z)] for row in coordinates_list
        }

    @staticmethod
    async def get_position_text(obj: Object, db: AsyncSession) -> str:
        """获取对象的文本格式坐标"""
        result = await db.execute(
            select(geofunc.ST_AsText(obj.current_position)).where(Object.object_id == obj.object_id)
        )
        position_text = result.scalar_one()
        return position_text


class EventFactory:
    """事件工厂类"""

    @staticmethod
    def create_object_updated_event(
        object_id: UUID,
        old_position: str,
        new_position: list[float],
        new_status: str,
        document_number: str,
        sensorsource: SensorSource,
        sku_id: str,
        openid: UUID | None = None,
        object_result: str | None = None,
    ) -> Event:
        """创建对象更新事件"""
        return Event(
            event_id=uuid4(),
            object_id=object_id,
            event_type=new_status,
            timestamp=datetime.now(UTC),
            openid=openid,
            payload={
                "old_position": old_position,
                "new_position": f"POINT Z ({new_position[0]} {new_position[1]} {new_position[2]})",
                "document_number": document_number,
            },
            object_result=object_result,
            document_number=document_number,
            sensor_source=sensorsource.model_dump(mode="json"),
            sku_id=sku_id,
        )


class ResponseBuilder:
    """响应构建器"""

    @staticmethod
    async def build_response(
        documentdata: DocumentDataRequest, objects: list[Object], db: AsyncSession
    ) -> DocumentDataRequest:
        """构建响应数据"""
        if not objects:
            # 如果没有对象，返回原始items结构，但objects为空数组
            resp_items = []
            for item in documentdata.items:
                resp_item = {key: value for key, value in item.items() if key != "objects"}
                resp_item["objects"] = []
                resp_items.append(resp_item)

            return DocumentDataRequest(
                document_number=documentdata.document_number,
                headers=documentdata.headers,
                items=resp_items,
                current_status=documentdata.current_status,
                next_status=documentdata.next_status,
                document_type=documentdata.document_type,
                allow_multiple_operations=documentdata.allow_multiple_operations,
                target_position=documentdata.target_position,
            )

        # 批量提取坐标信息
        coordinates_map = await GeometryHelper.extract_coordinates_batch(objects, db)

        # 按SKU分组objects
        sku_objects_map = defaultdict(list)
        for obj in objects:
            sku_objects_map[obj.sku_id].append(obj)

        # 构建响应项
        resp_items = []
        for item in documentdata.items:
            # 深拷贝原始项结构以保持原有数据
            resp_item = {key: value for key, value in item.items()}

            # 获取该SKU对应的objects
            sku_id = item.get("sku_id")
            sku_objs = []

            if sku_id and sku_id in sku_objects_map:
                for obj in sku_objects_map[sku_id]:
                    coordinates = coordinates_map.get(obj.object_id, [0.0, 0.0, 0.0])
                    # 构建响应对象
                    sku_objs.append(
                        {
                            "object_id": str(obj.object_id),
                            "sku_id": obj.sku_id,
                            "batch_number": obj.batch_number,
                            "current_position": coordinates,
                            "status": obj.status,
                        }
                    )

            resp_item["objects"] = sku_objs
            resp_items.append(resp_item)

        return DocumentDataRequest(
            document_number=documentdata.document_number,
            headers=documentdata.headers,
            items=resp_items,
            current_status=documentdata.next_status,  # 透传next_status作为current_status
            next_status=documentdata.next_status,
            document_type=documentdata.document_type,
            allow_multiple_operations=documentdata.allow_multiple_operations,
            target_position=documentdata.target_position,
        )


class OperationStrategy(ABC):
    """操作策略抽象基类"""

    @abstractmethod
    async def execute(self, context: OperationContext) -> DocumentDataRequest:
        """执行操作"""
        pass


class SKUOperationStrategy(OperationStrategy):
    """统一的SKU操作策略，支持数量检查和非检查两种模式"""

    async def execute(self, context: OperationContext) -> DocumentDataRequest:
        """执行SKU维度操作 - 根据allow_multiple_operations参数选择处理模式"""
        try:
            logger.info("🎯 SKU策略开始执行")
            # 获取allow_multiple_operations参数，默认为False（数量检查模式）
            allow_multiple_operations = getattr(
                context.documentdata, "allow_multiple_operations", False
            )

            # 验证目标位置参数
            self._validate_target_position(context.documentdata.target_position)
            logger.info(f"✅ 目标位置验证通过: {context.documentdata.target_position}")

            logger.info(
                f"🔍 开始查询现有对象 - 单据: {context.documentdata.document_number}, 当前状态: {context.documentdata.current_status}"
            )
            # 查询指定SKU的现有对象
            existing_objs_map = await self._get_objects_by_skuids_status_from_document(
                context.documentdata.document_number,
                context.db,
                context.documentdata.document_type,
                context.sku_ids,
                [context.documentdata.current_status],
            )
            logger.info(f"🔍 查询结果: 找到 {len(existing_objs_map)} 个现有对象")

            # 修复逻辑：只有在当前状态为object_created且查询不到对象时才创建新对象
            if not existing_objs_map and context.documentdata.current_status == "object_created":
                logger.info("❌ 未找到现有对象且当前状态为object_created，将创建新对象")
                # 若数据查询不出指定的sku对应的objects，则需要把对应的skus 生成objects 新增到objects表中。
                return await self._handle_new_objects(context)
            # 出库单创建情况下,只有在当前状态为ordered且查询不到对象时,需要调用仅通过sku查询出状态为instock的
            # sku,然后根据传入的sku 数量,更新对应数量的objects的状态和在documents json字段中增加一个key-value
            # "dn":dn_code.最终documents字段值如={"arrived": "ASN202511011","dn":"DN202511011"}
            elif not existing_objs_map and context.documentdata.next_status == "ordered":
                logger.info("🚪 出库单创建：当前状态为in_stock且下个状态为ordered且查询不到对象，尝试从instock状态分配")
                return await self._handle_outbound_order_creation(context)
            elif not existing_objs_map:
                logger.error(
                    f"❌ 未找到现有对象，但当前状态为 {context.documentdata.current_status}，不允许创建新对象"
                )
                raise BusinessRuleError(
                    f"当前状态为 {context.documentdata.current_status} 时，必须存在对应的对象才能进行状态转换",
                    rule_name="OBJECTS_NOT_FOUND_FOR_STATUS_TRANSITION",
                )

            logger.info(f"✅ 查询到 {len(existing_objs_map)} 个现有对象")

            # 打印现有对象详情
            for sku_id, obj in existing_objs_map.items():
                logger.info(
                    f"   现有对象: SKU={obj.sku_id}, 位置={obj.current_position}, 状态={obj.status}"
                )

            # 根据allow_multiple_operations参数选择处理模式
            if allow_multiple_operations:
                logger.info("🔓 使用非检查模式：直接处理所有现有对象，不进行数量检查")
                # 非检查模式：直接处理所有现有对象，不进行数量检查
                return await self._handle_without_quantity_check(context, existing_objs_map)
            else:
                logger.info("🔒 使用数量检查模式：进行数量检查，处理>、<、=三种场景")
                # 数量检查模式：需要数量检查，处理>、<、=三种场景
                return await self._handle_with_quantity_check(context, existing_objs_map)

        except Exception as exc:
            await context.db.rollback()
            raise

    async def _handle_without_quantity_check(
        self, context: OperationContext, existing_objs_map: dict[UUID, Object]
    ) -> DocumentDataRequest:
        """非检查模式：根据请求数量处理对象，允许多次操作但不强制数量匹配"""
        context.logger.info("使用非检查模式：根据请求数量处理对象")

        # 获取SKU数量映射
        sku_quantity_map = {}
        for item in context.documentdata.items:
            if "sku_id" in item and "quantity" in item:
                sku_quantity_map[item["sku_id"]] = item["quantity"]

        # 按SKU分组现有对象
        sku_existing_objects = defaultdict(list)
        for obj in existing_objs_map.values():
            sku_existing_objects[obj.sku_id].append(obj)

        final_objects = []

        for sku_id in context.sku_ids:
            if sku_id not in sku_quantity_map:
                error_msg = f"SKU {sku_id} 不在请求数量中"
                context.logger.error(error_msg)
                continue

            requested_quantity = sku_quantity_map[sku_id]
            existing_sku_objects = sku_existing_objects.get(sku_id, [])
            available_quantity = len(existing_sku_objects)

            context.logger.info(
                f"SKU {sku_id}: 请求数量={requested_quantity}, 现有数量={available_quantity}"
            )

            if requested_quantity <= available_quantity:
                # 请求数量 <= 现有数量：只处理请求数量的对象
                selected_objects = existing_sku_objects[:requested_quantity]
                context.logger.info(f"SKU {sku_id}: 选择处理 {len(selected_objects)} 个对象")

                # 处理选中的对象
                batch_objects = self._build_batch_objects_from_existing(selected_objects, context)
                batch_map = {obj["object_id"]: obj for obj in batch_objects}
                updated_objects = await self._update_objects(batch_map, context)
                final_objects.extend(updated_objects)
            else:
                # 请求数量 > 现有数量：需要进一步判断是否创建新对象
                shortage = requested_quantity - available_quantity
                context.logger.info(
                    f"SKU {sku_id}: 请求数量({requested_quantity}) > 现有数量({available_quantity}), 缺口 {shortage} 个"
                )

                # 处理所有现有对象
                batch_objects = self._build_batch_objects_from_existing(
                    existing_sku_objects, context
                )

                # 判断是否需要创建新对象补足数量
                if self._should_create_missing_objects(context, sku_id, shortage):
                    # 创建新对象补足数量
                    context.logger.info(f"SKU {sku_id}: 将创建 {shortage} 个新对象补足数量")
                    for i in range(shortage):
                        new_object_id = str(uuid4())
                        batch_objects.append(
                            self._build_new_object_data(new_object_id, sku_id, context)
                        )
                else:
                    # 不创建新对象，只处理现有对象
                    context.logger.info(
                        f"SKU {sku_id}: 不创建新对象，只处理现有的 {available_quantity} 个对象"
                    )

                # 批量处理所有对象
                batch_map = {obj["object_id"]: obj for obj in batch_objects}
                updated_objects = await self._update_objects(batch_map, context)
                final_objects.extend(updated_objects)

        # 构建响应
        return await ResponseBuilder.build_response(context.documentdata, final_objects, context.db)

    def _should_create_missing_objects(
        self, context: OperationContext, sku_id: str, shortage: int
    ) -> bool:
        """判断是否需要创建新对象补足数量缺口"""
        current_status = context.documentdata.current_status
        CAN_CREATE_OBJECTS_STATUS = ["sorting_task_generated"]

        if current_status in CAN_CREATE_OBJECTS_STATUS:
            # 处理所有现有对象，并创建新对象补足数量
            create_missing = True
            context.logger.info(f"SKU {sku_id}: 状态 {current_status} 允许创建新对象补足数量")
        else:
            # 处理所有现有对象，不需创建新对象补足数量
            create_missing = False
            context.logger.info(f"SKU {sku_id}: 状态 {current_status} 不创建新对象，只处理现有对象")

        return create_missing

    async def _handle_with_quantity_check(
        self, context: OperationContext, existing_objs_map: dict[UUID, Object]
    ) -> DocumentDataRequest:
        """数量检查模式：需要数量检查，处理>、<、=三种场景"""
        context.logger.info("使用数量检查模式：进行数量检查和对象创建")

        # 获取SKU数量映射
        sku_quantity_map = {}
        for item in context.documentdata.items:
            if "sku_id" in item and "quantity" in item:
                sku_quantity_map[item["sku_id"]] = item["quantity"]

        # 按SKU分组现有对象
        sku_existing_objects = defaultdict(list)
        for obj in existing_objs_map.values():
            sku_existing_objects[obj.sku_id].append(obj)

        final_objects = []

        for sku_id in context.sku_ids:
            if sku_id not in sku_quantity_map:
                error_msg = f"SKU {sku_id} 不在请求数量中"
                context.logger.error(error_msg)
                continue

            requested_quantity = sku_quantity_map[sku_id]
            existing_sku_objects = sku_existing_objects.get(sku_id, [])
            available_quantity = len(existing_sku_objects)

            context.logger.info(
                f"SKU {sku_id}: 请求数量={requested_quantity}, 现有数量={available_quantity}"
            )

            # 处理三种数量关系：>、<、=
            if requested_quantity > available_quantity:
                # 请求数量 > 现有数量：需要创建新对象
                processed_objs = await self._handle_quantity_greater(
                    context, sku_id, requested_quantity, available_quantity, existing_sku_objects
                )
                final_objects.extend(processed_objs)
            elif requested_quantity < available_quantity:
                # 请求数量 < 现有数量：需要选择部分对象处理
                processed_objs = await self._handle_quantity_less(
                    context, sku_id, requested_quantity, existing_sku_objects
                )
                final_objects.extend(processed_objs)
            else:
                # 请求数量 = 现有数量：处理所有对象
                processed_objs = await self._handle_quantity_equal(
                    context, sku_id, existing_sku_objects
                )
                final_objects.extend(processed_objs)

        # 构建响应
        return await ResponseBuilder.build_response(context.documentdata, final_objects, context.db)

    async def _handle_quantity_greater(
        self,
        context: OperationContext,
        sku_id: UUID,
        requested_quantity: int,
        available_quantity: int,
        existing_objects: list[Object],
    ) -> list[Object]:
        """处理请求数量 > 现有数量的情况"""
        context.logger.info(
            f"SKU {sku_id}: 需要创建 {requested_quantity - available_quantity} 个新对象"
        )

        # 使用辅助方法处理现有对象
        batch_objects = self._build_batch_objects_from_existing(existing_objects, context)

        # 使用辅助方法创建新对象
        new_objects_count = requested_quantity - available_quantity
        for i in range(new_objects_count):
            new_object_id = str(uuid4())
            batch_objects.append(self._build_new_object_data(new_object_id, sku_id, context))

        # 批量处理所有对象
        batch_map = {obj["object_id"]: obj for obj in batch_objects}
        updated_objects = await self._update_objects(batch_map, context)

        return updated_objects

    async def _handle_quantity_less(
        self,
        context: OperationContext,
        sku_id: UUID,
        requested_quantity: int,
        existing_objects: list[Object],
    ) -> list[Object]:
        """处理请求数量 < 现有数量的情况"""
        context.logger.info(
            f"SKU {sku_id}: 需要从 {len(existing_objects)} 个对象中选择 {requested_quantity} 个，剩余对象标记为DELETED"
        )

        # 选择前requested_quantity个对象处理
        selected_objects = existing_objects[:requested_quantity]
        # 剩余的对象需要标记为deleted
        remaining_objects = existing_objects[requested_quantity:]

        # 使用辅助方法处理选中的对象 - 更新为next_status
        selected_batch_objects = self._build_batch_objects_from_existing(selected_objects, context)

        # 处理剩余的对象 - 标记为DELETED
        deleted_batch_objects = []
        for obj in remaining_objects:
            deleted_obj_data = self._build_new_object_data(str(obj.object_id), obj.sku_id, context)
            deleted_obj_data["status"] = ObjectStatus.DELETED.value  # 覆盖状态为DELETED
            deleted_batch_objects.append(deleted_obj_data)

        # 合并所有需要处理的对象
        all_batch_objects = selected_batch_objects + deleted_batch_objects
        batch_map = {obj["object_id"]: obj for obj in all_batch_objects}

        # 批量处理所有对象
        processed_objects = await self._update_objects(batch_map, context)

        context.logger.info(
            f"SKU {sku_id}: 已处理 {len(selected_objects)} 个对象，标记 {len(remaining_objects)} 个对象为DELETED"
        )

        return processed_objects

    async def _handle_quantity_equal(
        self, context: OperationContext, sku_id: UUID, existing_objects: list[Object]
    ) -> list[Object]:
        """处理请求数量 = 现有数量的情况"""
        context.logger.info(f"SKU {sku_id}: 处理所有 {len(existing_objects)} 个对象")

        # 使用辅助方法构建batch_objects，消除重复代码
        batch_objects = self._build_batch_objects_from_existing(existing_objects, context)

        # 批量处理所有对象
        batch_map = {obj["object_id"]: obj for obj in batch_objects}
        updated_objects = await self._update_objects(batch_map, context)

        return updated_objects

    def _build_batch_objects_from_existing(
        self, objects: list[Object], context: OperationContext
    ) -> list[dict]:
        """从现有对象列表构建batch_objects - 消除重复代码"""
        return [
            {
                "object_id": str(obj.object_id),
                "sku_id": obj.sku_id,
                "status": context.documentdata.next_status,
                "current_position": context.documentdata.target_position,
            }
            for obj in objects
        ]

    def _build_new_object_data(
        self, object_id: str, sku_id: UUID, context: OperationContext
    ) -> dict:
        """构建新对象数据 - 统一对象创建逻辑"""
        return {
            "object_id": object_id,
            "sku_id": sku_id,
            "status": context.documentdata.next_status,
            "current_position": context.documentdata.target_position,
        }

    def _validate_target_position(self, position: list[float]) -> None:
        """验证目标位置参数 - 确保是有效的3D坐标"""
        if not isinstance(position, list) or len(position) != 3:
            raise ValueError(f"target_position必须是包含3个浮点数的列表，当前值: {position}")
        if not all(isinstance(coord, (int, float)) for coord in position):
            raise ValueError(f"target_position的所有元素必须是数字，当前值: {position}")

    async def _handle_new_objects(self, context: OperationContext) -> DocumentDataRequest:
        """处理新对象创建 - 当查询不到现有对象时，根据请求数据创建新对象"""
        logger.info("🆕 开始处理新对象创建")
        # 检查当前状态是否能合法转换到目标状态，而不是硬编码状态列表
        current_status = context.documentdata.current_status
        target_status = context.documentdata.next_status

        # 验证状态转换是否合法
        if not ObjectLifeCycle.is_valid_transition(current_status, target_status):
            possible_states = ObjectLifeCycle.get_valid_next_states(current_status)
            logger.error(
                f"❌ 状态转换验证失败: 当前状态={current_status}, 目标状态={target_status}, 可能的状态={possible_states}"
            )
            raise BusinessRuleError(
                f"当前单据状态为 {current_status}，无法转换到 {target_status} 状态。可能的状态转换: {possible_states}",
                rule_name="INVALID_STATUS_FOR_CREATION",
            )

        logger.info(
            f"✅ 状态验证通过: {context.documentdata.current_status}，开始为SKU {context.sku_ids} 创建新对象"
        )

        # 获取SKU数量映射
        sku_quantity_map = {}
        for item in context.documentdata.items:
            if "sku_id" in item and "quantity" in item:
                sku_quantity_map[item["sku_id"]] = item["quantity"]
                logger.info(f"📊 SKU数量映射: {item['sku_id']} -> {item['quantity']}")

        batch_objects = []
        total_to_create = 0

        for sku_id in context.sku_ids:
            if sku_id not in sku_quantity_map:
                error_msg = f"SKU {sku_id} 不在请求数量中"
                logger.error(error_msg)
                continue

            requested_quantity = sku_quantity_map[sku_id]
            logger.info(f"🆕 为SKU {sku_id} 创建 {requested_quantity} 个新对象")
            total_to_create += requested_quantity

            # 为每个SKU创建指定数量的新对象
            for i in range(requested_quantity):
                new_object_id = str(uuid4())
                logger.debug(f"   生成新对象ID: {new_object_id}")

                # 构建新对象数据
                obj_data = {
                    "object_id": new_object_id,
                    "sku_id": sku_id,
                    "status": context.documentdata.next_status,
                    "current_position": context.documentdata.target_position,  # 使用参数化的目标位置
                }

                batch_objects.append(obj_data)

        logger.info(f"📦 准备创建总计 {len(batch_objects)} 个新对象")

        if not batch_objects:
            logger.error("❌ 没有可创建的对象，请检查请求数据")
            raise BusinessRuleError(
                f"无法为SKU {context.sku_ids} 创建新对象，请检查请求数据",
                rule_name="UNABLE_TO_CREATE_OBJECTS",
            )

        # 使用_update_objects方法批量创建新对象
        logger.info("💾 开始批量创建新对象")
        batch_map = {obj["object_id"]: obj for obj in batch_objects}
        created_objects = await self._update_objects(batch_map, context)

        logger.info(f"✅ 成功创建 {len(created_objects)} 个新对象")

        # 构建响应
        return await ResponseBuilder.build_response(
            context.documentdata, created_objects, context.db
        )

    async def _handle_outbound_order_creation(
        self, context: OperationContext
    ) -> DocumentDataRequest:
        """处理出库单创建 - 当ordered状态查询不到对象时，从instock状态分配对象"""
        logger.info("🚪 开始处理出库单创建 - 从instock状态分配对象")

        # 获取SKU数量映射
        sku_quantity_map = {}
        for item in context.documentdata.items:
            if "sku_id" in item and "quantity" in item:
                sku_quantity_map[item["sku_id"]] = item["quantity"]
                logger.info(f"📊 出库单SKU数量映射: {item['sku_id']} -> {item['quantity']}")

        final_objects = []

        for sku_id in context.sku_ids:
            if sku_id not in sku_quantity_map:
                error_msg = f"SKU {sku_id} 不在请求数量中"
                logger.error(error_msg)
                continue

            requested_quantity = sku_quantity_map[sku_id]
            logger.info(f"🚪 SKU {sku_id}: 需要从instock状态分配 {requested_quantity} 个对象")

            # 查询instock状态的可用对象
            instock_objects = await self._get_instock_objects_for_sku(
                sku_id, context.db, requested_quantity
            )
            available_quantity = len(instock_objects)

            logger.info(f"📦 SKU {sku_id}: instock可用对象数量 = {available_quantity}")

            if available_quantity < requested_quantity:
                # 可用数量不足
                logger.error(
                    f"❌ SKU {sku_id}: instock可用对象({available_quantity}) < 需求数量({requested_quantity})"
                )
                raise BusinessRuleError(
                    f"SKU {sku_id} 的可用库存不足。需要: {requested_quantity}, 可用: {available_quantity}",
                    rule_name="INSUFFICIENT_STOCK",
                )

            # 选择需要数量的对象
            selected_objects = instock_objects[:requested_quantity]
            logger.info(f"✅ SKU {sku_id}: 选择 {len(selected_objects)} 个instock对象")

            # 构建批量更新数据
            batch_objects = []
            for obj in selected_objects:
                obj_data = {
                    "object_id": str(obj.object_id),
                    "sku_id": obj.sku_id,
                    "status": context.documentdata.next_status,  # 更新到目标状态
                    "current_position": context.documentdata.target_position,
                    # 添加出库单号到documents字段
                    "add_dn_document": {
                        "document_number": context.documentdata.document_number,
                        "document_type": context.documentdata.document_type,
                    },
                }
                batch_objects.append(obj_data)

            # 批量更新对象
            batch_map = {obj["object_id"]: obj for obj in batch_objects}
            updated_objects = await self._update_objects_for_outbound_order(batch_map, context)
            final_objects.extend(updated_objects)

        logger.info(f"✅ 出库单创建完成: 处理了 {len(final_objects)} 个对象")

        # 构建响应
        return await ResponseBuilder.build_response(context.documentdata, final_objects, context.db)

    async def _get_instock_objects_for_sku(
        self, sku_id: str, db: AsyncSession, limit: int
    ) -> list[Object]:
        """查询指定SKU的instock状态可用对象"""
        logger.info(f"🔍 查询SKU {sku_id} 的instock对象，限制数量: {limit}")

        stmt = (
            select(Object)
            .where(
                Object.sku_id == sku_id,
                Object.status == ObjectStatus.IN_STOCK.value,  # instock状态
                ~Object.status.in_([ObjectStatus.DELETED.value, ObjectStatus.ARCHIVED.value]),
            )
            .order_by(Object.created_at.asc())  # 按创建时间排序，优先分配早创建的
            .limit(limit)
        )

        result = await db.execute(stmt)
        objects = result.scalars().all()

        logger.info(f"📦 SKU {sku_id}: 查询到 {len(objects)} 个instock对象")

        # 转换坐标信息
        for obj in objects:
            if obj.current_position is not None:
                pt = to_shape(obj.current_position)
                obj._coords = [pt.x, pt.y, pt.z if hasattr(pt, "z") else 0.0]
            else:
                obj._coords = [0.0, 0.0, 0.0]

        return objects

    async def _update_objects_for_outbound_order(
        self,
        batch_map: dict[str, dict],
        context: OperationContext,
    ) -> list[Object]:
        """为出库单批量更新对象，包括添加DN单据到documents字段"""
        logger.info("🔄 开始为出库单批量更新对象")
        logger.info(f"📊 批量操作数量: {len(batch_map)} 个对象")

        if not batch_map:
            return []

        # 提取所有object_id
        object_ids = [UUID(str(obj_id)) for obj_id in batch_map]

        # 批量查询现有对象
        result = await context.db.execute(select(Object).where(Object.object_id.in_(object_ids)))
        existing_objects = {obj.object_id: obj for obj in result.scalars().all()}

        # 验证所有对象都存在
        missing_ids = set(object_ids) - set(existing_objects.keys())
        if missing_ids:
            raise BusinessRuleError(
                f"找不到指定的对象ID: {missing_ids}", rule_name="OBJECTS_NOT_FOUND"
            )

        # 批量更新对象
        events_to_create = []
        updated_objects = []

        for obj_id_str, obj_data in batch_map.items():
            obj_id = UUID(str(obj_id_str))
            obj = existing_objects[obj_id]
            new_status = obj_data["status"]
            new_position = obj_data["current_position"]

            # 状态转换验证
            if obj.status == new_status:
                raise BusinessRuleError(
                    f"Object {obj_id} 状态转换无效：不允许从 {obj.status} 转换到相同状态",
                    rule_name="SAME_STATUS_TRANSITION",
                )

            if not ObjectLifeCycle.is_valid_transition(obj.status, new_status):
                raise BusinessRuleError(
                    f"Object {obj_id} 无法从 {obj.status} 变更为 {new_status}",
                    rule_name="INVALID_STATUS_TRANSITION",
                )

            # 更新对象属性
            if isinstance(new_position, WKBElement):
                obj.current_position = new_position
                new_position_tmp = await wkb_element_to_xyz(new_position)
            else:
                obj.current_position = GeometryHelper.build_point(new_position)
                new_position_tmp = new_position

            obj.status = new_status
            obj.last_updated = datetime.now(UTC)

            # 更新documents字段，添加DN单据
            dn_info = obj_data.get("add_dn_document")
            if dn_info:
                await self._add_dn_to_documents_field(
                    obj, dn_info["document_number"], dn_info["document_type"], context.db
                )

            # 创建更新事件
            old_position = await GeometryHelper.get_position_text(obj, context.db)
            event = EventFactory.create_object_updated_event(
                obj_id,
                old_position,
                new_position_tmp,
                new_status,
                context.documentdata.document_number,
                context.sensorsource,
                obj.sku_id,
                context.openid,
            )
            events_to_create.append(event)
            updated_objects.append(obj)

        # 批量添加更新事件到数据库会话
        if events_to_create:
            context.db.add_all(events_to_create)

        logger.info(f"✅ 出库单批量更新完成: 更新了 {len(updated_objects)} 个对象")

        return updated_objects

    async def _add_dn_to_documents_field(
        self, obj: Object, dn_number: str, dn_type: str, db: AsyncSession
    ) -> None:
        """在documents字段中添加DN（出库单）信息"""
        logger.info(f"📝 为对象 {obj.object_id} 添加DN单据: {dn_number}")

        from sqlalchemy import text

        # 构建JSONB更新语句，添加dn字段
        update_sql = text("""
            UPDATE objects
            SET documents = jsonb_set(
                COALESCE(documents, '{}'::jsonb),
                ARRAY[:dn_key],
                CAST(:dn_number AS jsonb),
                true
            )
            WHERE object_id = :object_id
            RETURNING documents
        """)

        # 执行更新
        result = await db.execute(
            update_sql,
            {"dn_key": dn_type, "dn_number": f'"{dn_number}"', "object_id": str(obj.object_id)},
        )

        # 获取更新后的documents值
        updated_documents = result.scalar_one()
        logger.info(f"✅ 更新后的documents字段: {updated_documents}")

        # 同时更新内存中的对象状态
        if not obj.documents:
            obj.documents = {}
        obj.documents[dn_type] = dn_number

    async def _update_objects(
        self,
        batch_map: dict[str, dict],
        context: OperationContext,
    ) -> list[Object]:
        """
        根据batch_map批量更新对象，如果对象不存在则先创建

        Args:
            batch_map: 包含object_id和更新数据的字典映射
            context: 操作上下文

        Returns:
            list[Object]: 更新后的对象列表
        """
        logger.info("🔄 开始批量更新对象")
        logger.info(f"📊 批量操作数量: {len(batch_map)} 个对象")

        if not batch_map:
            return []
        # 提取所有object_id
        object_ids = [UUID(str(obj_id)) for obj_id in batch_map]
        # 批量查询现有对象
        result = await context.db.execute(select(Object).where(Object.object_id.in_(object_ids)))
        existing_objects = {obj.object_id: obj for obj in result.scalars().all()}

        # 找出缺失的对象ID
        missing_ids = set(object_ids) - set(existing_objects.keys())
        # 如果有缺失的对象，先创建它们（不创建事件）
        if missing_ids:
            logger.info(f"发现 {len(missing_ids)} 个缺失的对象，将先创建它们: {missing_ids}")

            objects_to_create = []

            for missing_id in missing_ids:
                obj_data = batch_map[str(missing_id)]

                # 创建新对象（使用默认状态，后续统一更新时会设置正确状态）
                new_obj = Object(
                    object_id=missing_id,
                    sku_id=obj_data["sku_id"],
                    batch_number=obj_data.get("batch_number"),
                    current_position=GeometryHelper.build_point([0.0, 0.0, 0.0]),  # 临时位置
                    status=ObjectStatus.PENDING.value,  # 临时状态
                    documents={
                        context.documentdata.document_type: context.documentdata.document_number
                    },
                    openid=context.openid,
                    created_at=datetime.now(UTC),
                    last_updated=datetime.now(UTC),
                )
                objects_to_create.append(new_obj)

                # 将新创建的对象添加到existing_objects中，以便后续统一处理
                existing_objects[missing_id] = new_obj

            # 批量添加新对象到数据库会话（不添加创建事件）
            if objects_to_create:
                context.db.add_all(objects_to_create)
                # 立即flush确保对象插入数据库，以便后续UPDATE能找到它们
                await context.db.flush()

            logger.info(f"已创建 {len(objects_to_create)} 个新对象（临时状态）")

        # 现在统一更新所有对象（包括新创建的和已存在的）
        events_to_create = []
        updated_objects = []
        events_to_create_keys = []

        for obj_id_str, obj_data in batch_map.items():
            obj_id = UUID(str(obj_id_str))
            obj = existing_objects[obj_id]
            new_status = obj_data["status"]
            new_position = obj_data["current_position"]

            # 获取更新前的位置和状态（用于事件记录）

            old_status = obj.status
            # 对于新创建的对象，状态转换验证会更宽松（从临时状态转换）
            # 对于已存在的对象，进行正常的状态转换验证
            if obj_id not in missing_ids:  # 已存在的对象
                if old_status != ObjectStatus.DELETED.value:  # 过滤掉deleted的数据
                    # 检查是否为相同状态转换
                    if obj.status == new_status:
                        raise BusinessRuleError(
                            f"Object {obj_id} 状态转换无效：不允许从 {obj.status} 转换到相同状态",
                            rule_name="SAME_STATUS_TRANSITION",
                        )
                    # 检查状态转换是否合法
                    elif not ObjectLifeCycle.is_valid_transition(obj.status, new_status):
                        raise BusinessRuleError(
                            f"Object {obj_id} 无法从 {obj.status} 变更为 {new_status}",
                            rule_name="INVALID_STATUS_TRANSITION",
                        )

            new_position_tmp = None
            # 更新对象属性
            # 先判断类型
            if isinstance(new_position, WKBElement):
                # 已经是 Geometry，直接复用
                obj.current_position = new_position
                new_position_tmp = await wkb_element_to_xyz(new_position)
            else:
                # new_position 是 list/tuple，需要转成 Geometry
                obj.current_position = GeometryHelper.build_point(new_position)
                new_position_tmp = new_position
            obj.status = new_status
            obj.last_updated = datetime.now(UTC)

            # # 更新批次号（如果提供）
            # if "batch_number" in obj_data and obj_data["batch_number"]:
            #     obj.batch_number = obj_data["batch_number"]

            # # 如果提供了openid，则更新openid字段
            # if context.openid is not None:
            #     obj.openid = context.openid

            # 更新documents字段，根据状态添加对应的单据类型
            await self._update_documents_field(
                obj,
                new_status,
                context.documentdata.document_number,
                context.documentdata.document_type,
                context.db,
            )
            # 为所有对象创建统一的更新事件（包括新创建的对象）
            if obj_id in missing_ids:
                # 对于新创建的对象，创建"创建+更新"类型的事件
                event = EventFactory.create_object_updated_event(
                    obj_id,
                    "POINT Z (0 0 0)",
                    new_position_tmp,
                    new_status,
                    context.documentdata.document_number,
                    context.sensorsource,
                    obj_data["sku_id"],
                    context.openid,
                )
            else:
                # 对于已存在的对象，创建正常的更新事件
                old_position = await GeometryHelper.get_position_text(obj, context.db)
                event = EventFactory.create_object_updated_event(
                    obj_id,
                    old_position,
                    new_position_tmp,
                    new_status,
                    context.documentdata.document_number,
                    context.sensorsource,
                    obj_data["sku_id"],
                    context.openid,
                )
            if obj_id not in events_to_create_keys:
                events_to_create_keys.append(obj_id)
                events_to_create.append(event)
            updated_objects.append(obj)
        # 批量添加更新事件到数据库会话
        if events_to_create:
            context.db.add_all(events_to_create)

        logger.info(
            f"批量处理完成: 创建了 {len(missing_ids)} 个新对象，"
            f"更新了 {len(updated_objects) - len(missing_ids)} 个已存在对象，"
            f"总计处理 {len(updated_objects)} 个对象"
        )

        return updated_objects

    async def _update_documents_field(
        self,
        obj: Object,
        new_status: str,
        document_number: str,
        document_type: str,
        db: AsyncSession,
    ) -> None:
        """根据状态更新documents字段，添加对应的单据类型"""
        # 状态到单据类型的映射 - 扩展以支持完整的上架流程
        # 根据上架流程描述，需要在不同的状态添加对应的单据类型

        # 获取当前状态对应的单据类型
        if document_type:
            # 使用原生SQL更新，确保立即生效并绕过SQLAlchemy缓存
            from sqlalchemy import text

            # 构建JSONB更新语句
            update_sql = text("""
                UPDATE objects
                SET documents = jsonb_set(
                    COALESCE(documents, '{}'::jsonb),
                    ARRAY[:document_type_path],
                    CAST(:document_number AS jsonb),
                    true
                )
                WHERE object_id = :object_id
                RETURNING documents
            """)

            # 执行更新
            result = await db.execute(
                update_sql,
                {
                    "document_type_path": document_type,
                    "document_number": f'"{document_number}"',
                    "object_id": str(obj.object_id),
                },
            )

            # 获取更新后的documents值
            updated_documents = result.scalar_one()

            # 同时更新内存中的对象状态
            if not obj.documents:
                obj.documents = {}
            obj.documents[document_type] = document_number

        else:
            pass

    async def _get_objects_by_skuids_from_document(
        self, document_number: str, db: AsyncSession, document_type: str, sku_ids: list[str]
    ) -> dict[UUID, Object]:
        stmt = select(Object).where(
            Object.documents.op("->>")(document_type) == document_number,
            ~Object.status.in_(
                [ObjectStatus.DELETED.value, ObjectStatus.ARCHIVED.value, ObjectStatus.STORED.value]
            ),
            Object.sku_id.in_(sku_ids),
        )
        res = await db.execute(stmt)
        objs = res.scalars().all()

        # 把 geometry 转成 [x, y, z] 写回 Python 属性
        for obj in objs:
            if obj.current_position is not None:
                pt = to_shape(obj.current_position)  # shapely.geometry.Point
                obj._coords = [pt.x, pt.y, pt.z if hasattr(pt, "z") else 0.0]
            else:
                obj._coords = [0.0, 0.0, 0.0]

        return {obj.object_id: obj for obj in objs}

    async def _get_objects_by_skuids_status_from_document(
        self,
        document_number: str,
        db: AsyncSession,
        document_type: str,
        sku_ids: list[str],
        object_status: list[str],
    ) -> dict[UUID, Object]:
        # 根据单号、sku、状态,查出单据及当前单据状态，找出对应SKU
        # 修复查询逻辑：精确查询指定文档类型和状态的对象
        logger.info(
            f"   查询参数: 单据号={document_number}, SKU={sku_ids}, 状态={object_status}, 文档类型={document_type}"
        )
        stmt = select(Object).where(
            Object.documents.op("->>")(document_type) == document_number,  # 精确查询指定文档类型
            Object.status.in_(object_status),
            Object.sku_id.in_(sku_ids),
        )
        logger.info(f"   SQL查询: {stmt}")
        res = await db.execute(stmt)
        objs = res.scalars().all()
        logger.info(f"   数据库查询结果: {len(objs)} 个对象")

        # 把 geometry 转成 [x, y, z] 写回 Python 属性
        for obj in objs:
            if obj.current_position is not None:
                pt = to_shape(obj.current_position)  # shapely.geometry.Point
                obj._coords = [pt.x, pt.y, pt.z if hasattr(pt, "z") else 0.0]
            else:
                obj._coords = [0.0, 0.0, 0.0]

        return {obj.object_id: obj for obj in objs}

    async def _to_dict(self, obj):
        """把 Object 实例或 dict 统一转成 dict"""
        if hasattr(obj, "model_dump"):  # Pydantic v2
            return obj.model_dump()
        if hasattr(obj, "__dict__"):  # 普通对象
            return obj.__dict__
        return obj

    async def _extract_all_objects(
        self, documentdata: DocumentDataRequest, existing_objs_map: dict[UUID, Object]
    ) -> list[dict]:
        """提取所有对象数据 - 按SKU分组处理"""
        # 根据 DocumentDataRequest 中item.qty,及 existing_objs_map 生成objects，objectid 取对应的 existing_objs_map中的objectid,
        # 生成objects 的位置及状态取 DocumentDataRequest 中对应的Object 的位置和状态。若item.qty 大于len(existing_objs_map),则多出的objects
        # 的objectid 自动生成。若item.qty 小于len(existing_objs_map),则超出的objects的状态置为 ObjectStatus.DELETED.value,最后返回list[dict]

        result_objects = []
        # 按SKU分组existing对象
        existing_by_sku = {}
        for obj_id, obj in existing_objs_map.items():
            sku_id = obj.sku_id
            if sku_id not in existing_by_sku:
                existing_by_sku[sku_id] = []
            existing_by_sku[sku_id].append((obj_id, obj))
        # 按SKU分组处理每个item
        for item in documentdata.items:
            if isinstance(item, dict):
                sku_id = item["sku_id"]
                qty = item["qty"]
                item_objects = item.get("objects", [])
            else:
                # 如果item是Pydantic模型对象
                sku_id = item.sku_id
                qty = item.qty
                item_objects = getattr(item, "objects", [])

            # 获取该SKU对应的existing对象
            existing_sku_objects = existing_by_sku.get(sku_id, [])
            existing_count = len(existing_sku_objects)
            # 三种情况的处理：
            if qty == existing_count:
                # 情况1：数量相等，直接更新现有对象
                for i, (obj_id, existing_obj) in enumerate(existing_sku_objects):
                    # 从item的objects中获取对应的数据（数量应该匹配）
                    if isinstance(item_objects[i], dict):
                        obj_data = item_objects[i].copy()
                    else:
                        # 如果是Pydantic模型对象，转换为字典
                        obj_data = (
                            item_objects[i].model_dump()
                            if hasattr(item_objects[i], "model_dump")
                            else item_objects[i].__dict__.copy()
                        )

                    # 使用现有对象的ID
                    obj_data["object_id"] = str(obj_id)
                    result_objects.append(obj_data)

            elif qty > existing_count:
                # 情况2：需求量大于现有数量

                raise

            else:  # qty < existing_count
                # 情况3：需求量小于现有数量，更新部分对象，其余不处理

                # 处理需要保留的对象（数量 = qty）
                for i in range(qty):
                    obj_id, _existing_obj = existing_sku_objects[i]
                    if isinstance(item_objects[i], dict):
                        obj_data = item_objects[i].copy()
                    else:
                        obj_data = (
                            item_objects[i].model_dump()
                            if hasattr(item_objects[i], "model_dump")
                            else item_objects[i].__dict__.copy()
                        )

                    obj_data["object_id"] = str(obj_id)
                    result_objects.append(obj_data)
        return result_objects


class ObjectsOperationService:
    """对象操作服务主类 - 使用统一的SKU操作策略"""

    def __init__(self) -> None:
        self.sku_strategy = SKUOperationStrategy()

    async def objects_operation(
        self,
        documentdata: DocumentDataRequest,
        sensorsource: SensorSource,
        sku_ids: list[str] | None,
        db: AsyncSession,
        openid: UUID | None = None,
    ) -> DocumentDataRequest:
        """
        统一 Object & Event 操作入口，整体事务保护。
        使用SKU策略处理所有操作，支持数量检查和非检查两种模式。
        """
        try:
            logger.info("🔄 开始创建操作上下文")
            # 创建操作上下文
            context = OperationContext(
                documentdata=documentdata,
                sensorsource=sensorsource,
                sku_ids=sku_ids,
                db=db,
                openid=openid,
                logger=logger,
            )
            logger.info(f"✅ 操作上下文创建完成，数据库会话: {id(db)}")

            # 前置：统一位置校验
            all_positions = []

            # 验证items中的objects位置
            for item in documentdata.items:
                if "objects" in item:
                    for obj in item["objects"]:
                        all_positions.append(obj["current_position"])

            # 验证target_position（新对象创建时的目标位置）
            if documentdata.target_position:
                all_positions.append(documentdata.target_position)

            if all_positions:
                logger.info(f"📍 开始验证 {len(all_positions)} 个位置坐标")
                await PositionValidator.validate_all_positions(all_positions, db)
                logger.info("✅ 位置坐标验证通过")
            else:
                logger.info("📍 无需验证位置坐标（无位置数据）")

            # 直接使用SKU策略处理所有操作
            logger.info(
                f"🎯 开始执行SKU策略处理单据: {documentdata.document_number}, SKU: {sku_ids}"
            )
            result = await self.sku_strategy.execute(context)
            logger.info("✅ SKU策略执行完成")

            # 统一提交事务
            logger.info("💾 开始提交数据库事务")
            await db.commit()
            logger.info("✅ 数据库事务提交成功")

            # 打印结果摘要
            if hasattr(result, "items") and result.items:
                total_objects = sum(len(item.get("objects", [])) for item in result.items)
                logger.info(f"📊 处理结果: {len(result.items)} 个SKU, 总计 {total_objects} 个对象")

            return result
        except BusinessRuleError as e:
            await db.rollback()
            logger.error(f"objects_operation业务规则错误: {e.error_code} - {e.message}")
            raise  # 重新抛出BusinessRuleError，保持原始错误码
        except Exception as e:
            await db.rollback()
            logger.error(f"objects_operation系统错误: {type(e).__name__}: {e}")
            raise BusinessRuleError(
                f"系统内部错误: {type(e).__name__}: {e}", rule_name="SYSTEM_ERROR"
            ) from e


# Create FastAPI app for WMS core
wms_app = FastAPI(title="WMS3D Core API", description="Core API for WMS3D system", version="1.0.0")
objects_operation_service = ObjectsOperationService()


@wms_app.post("/api/v1/objects_operation/")
async def objects_operation(
    request: ObjectsOperationRequest,
    db: AsyncSession = Depends(get_async_db),
):
    """Objects operation with unified payload containing sku_ids and openid."""
    # 请求入口日志
    logger.info("=" * 80)
    logger.info("🚀 开始处理 objects_operation 请求")
    logger.info(f"📋 单据号: {request.documentdata.document_number}")
    logger.info(f"📦 单据类型: {request.documentdata.document_type}")
    logger.info(
        f"📊 状态转换: {request.documentdata.current_status} -> {request.documentdata.next_status}"
    )
    logger.info(f"🎯 SKU列表: {request.sku_ids}")
    logger.info(f"👤 用户ID: {request.openid}")
    logger.info(f"🔢 允许多次操作: {request.documentdata.allow_multiple_operations}")
    logger.info(f"📍 目标位置: {request.documentdata.target_position}")
    logger.info(f"📝 请求项数量: {len(request.documentdata.items)}")

    # 打印items详情
    for i, item in enumerate(request.documentdata.items):
        logger.info(f"   Item {i + 1}: SKU={item.get('sku_id')}, 数量={item.get('quantity')}")

    try:
        result = await objects_operation_service.objects_operation(
            request.documentdata, request.sensorsource, request.sku_ids, db, request.openid
        )
        logger.info("✅ objects_operation 请求处理成功")
        logger.info("=" * 80)
        # 返回统一格式的成功响应
        return SuccessResponse(
            data=result.model_dump(),
            message=f"单据 {request.documentdata.document_number} 处理成功",
        )
    except BusinessRuleError as e:
        # 根据业务规则错误类型返回不同的错误码
        error_code_map = {
            "POSITION_VALIDATION": ResponseCode.INVALID_POSITION,
            "INVALID_STATUS_TRANSITION": ResponseCode.INVALID_STATUS_TRANSITION,
            "SAME_STATUS_TRANSITION": ResponseCode.SAME_STATUS_TRANSITION,
            "INVALID_STATUS_FOR_CREATION": ResponseCode.INVALID_STATUS_TRANSITION,
            "UNABLE_TO_CREATE_OBJECTS": ResponseCode.INVALID_QUANTITY,
        }

        # 优先使用details中的rule_name，其次使用error_code
        rule_name = None
        if e.details and isinstance(e.details, dict):
            rule_name = e.details.get("rule")

        if rule_name and rule_name in error_code_map:
            code = error_code_map[rule_name]
        else:
            code = error_code_map.get(e.error_code, ResponseCode.INTERNAL_ERROR)
        logger.error(f"❌ 业务规则错误: {e.error_code} - {e.message}")
        logger.info("=" * 80)

        return ErrorResponse(
            code=code, message=e.message, data={"error_code": e.error_code, "details": e.details}
        )
    except IntegrityError as e:
        logger.error(f"❌ 数据库完整性错误: {str(e)}")
        logger.info("=" * 80)
        return ErrorResponse(
            code=ResponseCode.DATABASE_ERROR,
            message="数据库操作失败，请检查数据完整性",
            data={"original_error": str(e)},
        )
    except Exception as e:
        logger.error(f"❌ 系统内部错误: {type(e).__name__}: {e}")
        logger.info("=" * 80)
        return ErrorResponse(
            code=ResponseCode.INTERNAL_ERROR,
            message="系统内部错误，请稍后重试",
            data={"error_type": type(e).__name__, "original_error": str(e)},
        )


class InventoryQueryRequest(BaseModel):
    """库存查询请求模型"""
    sku_ids: List[str] = Field(..., description="SKU ID列表，支持单个或多个SKU查询")


@wms_app.post("/api/v1/inventory/query/")
async def query_inventory(
    request: InventoryQueryRequest,
    db: AsyncSession = Depends(get_async_db),
):
    """查询SKU可用库存

    根据SKU列表查询状态为in_stock的可用库存数量
    """
    logger.info(f"🔍 查询库存: SKU列表 {request.sku_ids}")

    try:
        # 构建查询：按SKU分组统计in_stock状态的数量
        query = select(
            Object.sku_id,
            func.count(Object.object_id).label("available_quantity")
        ).where(
            Object.sku_id.in_(request.sku_ids),
            Object.status == ObjectStatus.IN_STOCK.value
        ).group_by(Object.sku_id)

        result = await db.execute(query)
        rows = result.fetchall()

        # 构建响应：确保所有请求的SKU都有返回
        inventory_data = {}
        for sku_id in request.sku_ids:
            inventory_data[sku_id] = 0  # 默认0库存

        for row in rows:
            inventory_data[row.sku_id] = row.available_quantity

        logger.info(f"✅ 库存查询完成: {inventory_data}")

        return SuccessResponse(
            data={
                "inventory": inventory_data,
                "total_skus": len(request.sku_ids),
                "available_skus": len([qty for qty in inventory_data.values() if qty > 0])
            },
            message="库存查询成功"
        )

    except Exception as e:
        logger.error(f"❌ 库存查询失败: {type(e).__name__}: {e}")
        return ErrorResponse(
            code=ResponseCode.INTERNAL_ERROR,
            message="库存查询失败，请稍后重试",
            data={"error_type": type(e).__name__, "original_error": str(e)}
        )


@wms_app.get("/health")
async def health_check():
    """Health check endpoint."""
    return {"status": "healthy", "service": "wms-core"}
