from fastapi import APIRouter, HTTPException, Query
# from fastapi import UploadFile, File  # 云侧场景识别已禁用，不再需要文件上传
from services.scene_memory import SceneMemoryService
# 云侧场景识别已禁用，相关导入已注释
# from services.prompt.scene_vlm_templates import SceneTemplates
# from services.llm import LLMService
from config.weaviate_config import get_weaviate_client_config
from utils.logger import (
    emobot_logger,
    log_function_call,
    log_function_result,
    log_function_error,
)
from utils.parse_output import extract_json_from_text
# from utils.upload import upload_to_obs  # OBS服务已注释

from typing import Optional, List, Dict, Any
import time
import json
import base64
import io

logger = emobot_logger.get_logger()

router = APIRouter(prefix="/api/scene", tags=["scene"])


# 场景识别功能已注释，云侧不再进行场景识别
# @router.post("/get")
# async def get_scene_from_image(
#     image: UploadFile = File(..., description="场景图片"),
# ):
#     """通过图片识别场景信息 - 已注释，云侧不再进行场景识别"""
#     pass


@router.post("/vlm-analysis-resp")
async def receive_vlm_analysis_result(vlm_data: Dict[str, Any]):
    """接收端侧VLM分析结果"""
    start_time = time.time()
    log_function_call("receive_vlm_analysis_result", {"task_id": vlm_data.get("header", {}).get("task_id")})
    emobot_logger.log_api_request(
        "POST", "/api/scene/vlm-analysis-resp", {"task_id": vlm_data.get("header", {}).get("task_id")}
    )
    
    try:
        # 验证VLM分析结果格式
        header = vlm_data.get("header", {})
        payload = vlm_data.get("payload", {})
        
        if not header.get("action") == "vlm-analysis-resp":
            raise HTTPException(status_code=400, detail="Invalid action type")
        
        if not header.get("status") == "success":
            logger.error(f"VLM analysis failed: {payload.get('message', 'Unknown error')}")
            raise HTTPException(status_code=400, detail=f"VLM analysis failed: {payload.get('message')}")
        
        # 提取VLM分析结果
        results = payload.get("results", {})
        downstream = payload.get("downstream", {})
        
        # 处理环境分类结果
        env_classification = results.get("environment_classification", {})
        scene_category = env_classification.get("category", "unknown")
        scene_confidence = env_classification.get("confidence", 0.0)
        
        # 处理平面识别结果
        plane_recognition = results.get("plane_recognition", {})
        planes = plane_recognition.get("planes", [])
        
        # 处理手势/姿势识别结果
        gesture_pose = results.get("gesture_pose_recognition", {})
        gestures = gesture_pose.get("gestures", [])
        pose = gesture_pose.get("pose", "unknown")
        
        # 构建场景数据
        scene_data = {
            "scene": scene_category,
            "confidence": scene_confidence,
            "planes": planes,
            "gestures": gestures,
            "pose": pose,
            "supporting_details": env_classification.get("supporting_details", [])
        }
        
        # 使用Weaviate场景记忆服务进行智能场景识别和记忆管理
        weaviate_config = get_weaviate_client_config()
        scene_memory_service = SceneMemoryService(
            weaviate_url=weaviate_config["weaviate_url"],
            openai_api_key=weaviate_config.get("openai_api_key"),
            openai_base_url=weaviate_config.get("openai_base_url"),
        )
        
        # 智能场景记忆管理
        memory_result = scene_memory_service.create_or_update_scene_memory(scene_data)
        
        # 构建返回结果
        result = {
            "task_id": header.get("task_id"),
            "scene_id": memory_result["scene_id"],
            "scene": memory_result["scene"],
            "items": memory_result["items"],
            "action": memory_result["action"],
            "similarity": memory_result.get("similarity", 1.0),
            "weaviate_id": memory_result.get("weaviate_id"),
            "message": memory_result.get("message", ""),
            "vlm_confidence": scene_confidence,
            "created_at": memory_result.get("created_at", ""),
            "updated_at": memory_result.get("updated_at", ""),
            "vlm_analysis": {
                "environment_classification": env_classification,
                "plane_recognition": plane_recognition,
                "gesture_pose_recognition": gesture_pose,
                "downstream": downstream
            }
        }
        
        logger.info(f"VLM分析结果处理完成: {memory_result['action']} - {memory_result['scene_id']}")
        
        duration = (time.time() - start_time) * 1000
        emobot_logger.log_api_response("POST", "/api/scene/vlm-analysis-resp", 200, result, duration)
        log_function_result("receive_vlm_analysis_result", result, duration)
        
        return result
        
    except HTTPException:
        raise
    except Exception as e:
        duration = (time.time() - start_time) * 1000
        log_function_error("receive_vlm_analysis_result", e, {"task_id": vlm_data.get("header", {}).get("task_id")})
        emobot_logger.log_api_response(
            "POST", "/api/scene/vlm-analysis-resp", 500, {"error": str(e)}, duration
        )
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/memory/summary")
async def get_scene_memory_summary():
    """获取场景记忆摘要信息"""
    start_time = time.time()
    log_function_call("get_scene_memory_summary", {})

    try:
        # 获取Weaviate配置
        weaviate_config = get_weaviate_client_config()
        scene_memory_service = SceneMemoryService(
            weaviate_url=weaviate_config["weaviate_url"],
            openai_api_key=weaviate_config.get("openai_api_key"),
            openai_base_url=weaviate_config.get("openai_base_url"),
        )

        summary = scene_memory_service.get_scene_memory_summary()

        duration = (time.time() - start_time) * 1000
        log_function_result("get_scene_memory_summary", summary, duration)

        return summary

    except Exception as e:
        duration = (time.time() - start_time) * 1000
        log_function_error("get_scene_memory_summary", e, {})
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/memory/search")
async def search_scene_memory(
    query: str = Query(..., description="搜索查询"),
    search_type: str = Query("all", description="搜索类型: scene, items, all"),
    limit: int = Query(10, description="返回结果数量限制"),
):
    """搜索场景记忆"""
    start_time = time.time()
    log_function_call(
        "search_scene_memory",
        {"query": query, "search_type": search_type, "limit": limit},
    )

    try:
        # 获取Weaviate配置
        weaviate_config = get_weaviate_client_config()
        scene_memory_service = SceneMemoryService(
            weaviate_url=weaviate_config["weaviate_url"],
            openai_api_key=weaviate_config.get("openai_api_key"),
            openai_base_url=weaviate_config.get("openai_base_url"),
        )

        results = scene_memory_service.search_scene_memory(query, search_type, limit)

        duration = (time.time() - start_time) * 1000
        log_function_result("search_scene_memory", {"count": len(results)}, duration)

        return {
            "query": query,
            "search_type": search_type,
            "results": results,
            "total": len(results),
        }

    except Exception as e:
        duration = (time.time() - start_time) * 1000
        log_function_error("search_scene_memory", e, {"query": query})
        raise HTTPException(status_code=500, detail=str(e))


@router.get("/memory/check")
async def check_scene_memory(
    scene: str = Query(..., description="场景名称"),
    items: str = Query(..., description="物品列表，用逗号分隔"),
):
    """检查场景是否已被记忆"""
    start_time = time.time()
    log_function_call("check_scene_memory", {"scene": scene, "items": items})

    try:
        # 解析物品列表
        items_list = [item.strip() for item in items.split(",") if item.strip()]

        scene_data = {"scene": scene, "items": items_list}

        # 获取Weaviate配置
        weaviate_config = get_weaviate_client_config()
        scene_memory_service = SceneMemoryService(
            weaviate_url=weaviate_config["weaviate_url"],
            openai_api_key=weaviate_config.get("openai_api_key"),
            openai_base_url=weaviate_config.get("openai_base_url"),
        )

        is_remembered, similar_scene = scene_memory_service.is_scene_remembered(
            scene_data
        )

        result = {
            "is_remembered": is_remembered,
            "similar_scene": similar_scene,
            "query_scene": scene_data,
        }

        duration = (time.time() - start_time) * 1000
        log_function_result("check_scene_memory", result, duration)

        return result

    except Exception as e:
        duration = (time.time() - start_time) * 1000
        log_function_error("check_scene_memory", e, {"scene": scene})
        raise HTTPException(status_code=500, detail=str(e))


@router.delete("/memory/{scene_id}")
async def delete_scene_memory(scene_id: str):
    """删除场景记忆"""
    start_time = time.time()
    log_function_call("delete_scene_memory", {"scene_id": scene_id})

    try:
        # 获取Weaviate配置
        weaviate_config = get_weaviate_client_config()
        scene_memory_service = SceneMemoryService(
            weaviate_url=weaviate_config["weaviate_url"],
            openai_api_key=weaviate_config.get("openai_api_key"),
            openai_base_url=weaviate_config.get("openai_base_url"),
        )

        success = scene_memory_service.delete_scene_memory(scene_id)

        if success:
            result = {"message": f"场景记忆 {scene_id} 删除成功", "success": True}
        else:
            result = {"message": f"场景记忆 {scene_id} 删除失败或不存在", "success": False}

        duration = (time.time() - start_time) * 1000
        log_function_result("delete_scene_memory", result, duration)

        return result

    except Exception as e:
        duration = (time.time() - start_time) * 1000
        log_function_error("delete_scene_memory", e, {"scene_id": scene_id})
        raise HTTPException(status_code=500, detail=str(e))
