"""
格式转换模块 - 处理 API 格式转换和数据处理

此模块包含在 Anthropic API 格式和 LiteLLM/OpenAI 格式之间转换数据的函数，
以及处理工具调用和结果的辅助函数。
"""

import json
import logging
import uuid
from typing import Dict, Any, Union, List

from models import MessagesRequest

logger = logging.getLogger(__name__)


# 清理 Gemini 模式的帮助函数
def clean_gemini_schema(schema: Any) -> Any:
    """递归删除 Gemini 的 JSON 模式中不支持的字段。"""
    if isinstance(schema, dict):
        # 删除 Gemini 工具参数不支持的特定键
        schema.pop("additionalProperties", None)
        schema.pop("default", None)

        # 检查字符串类型中不支持的 'format'
        if schema.get("type") == "string" and "format" in schema:
            allowed_formats = {"enum", "date-time"}
            if schema["format"] not in allowed_formats:
                logger.debug(f"从 Gemini 模式中删除字符串类型不支持的格式 '{schema['format']}'。")
                schema.pop("format")

        # 递归清理嵌套模式（属性、项目等）
        for key, value in list(schema.items()): # 使用 list() 允许在迭代期间进行修改
            schema[key] = clean_gemini_schema(value)
    elif isinstance(schema, list):
        # 递归清理列表中的项目
        return [clean_gemini_schema(item) for item in schema]
    return schema


def parse_tool_result_content(content):
    """正确解析和规范化工具结果内容的帮助函数。"""
    if content is None:
        return "No content provided"
        
    if isinstance(content, str):
        return content
        
    if isinstance(content, list):
        result = ""
        for item in content:
            if isinstance(item, dict) and item.get("type") == "text":
                result += item.get("text", "") + "\n"
            elif isinstance(item, str):
                result += item + "\n"
            elif isinstance(item, dict):
                if "text" in item:
                    result += item.get("text", "") + "\n"
                else:
                    try:
                        result += json.dumps(item) + "\n"
                    except:
                        result += str(item) + "\n"
            else:
                try:
                    result += str(item) + "\n"
                except:
                    result += "Unparseable content\n"
        return result.strip()
        
    if isinstance(content, dict):
        if content.get("type") == "text":
            return content.get("text", "")
        try:
            return json.dumps(content)
        except:
            return str(content)
            
    # 任何其他类型的回退
    try:
        return str(content)
    except:
        return "Unparseable content"


def convert_anthropic_to_litellm(anthropic_request: MessagesRequest) -> Dict[str, Any]:
    """将 Anthropic API 请求格式转换为 LiteLLM 格式（遵循 OpenAI）。"""
    # LiteLLM 在使用 model="anthropic/claude-3-opus-20240229" 格式时已经处理 Anthropic 模型
    # 所以我们只需要将 Pydantic 模型转换为预期格式的字典
    
    messages = []
    
    # 如果存在系统消息，则添加
    if anthropic_request.system:
        # 处理不同格式的系统消息
        if isinstance(anthropic_request.system, str):
            # 简单字符串格式
            messages.append({"role": "system", "content": anthropic_request.system})
        elif isinstance(anthropic_request.system, list):
            # 内容块列表
            system_text = ""
            for block in anthropic_request.system:
                if hasattr(block, 'type') and block.type == "text":
                    system_text += block.text + "\n\n"
                elif isinstance(block, dict) and block.get("type") == "text":
                    system_text += block.get("text", "") + "\n\n"
            
            if system_text:
                messages.append({"role": "system", "content": system_text.strip()})
    
    # 添加对话消息
    for idx, msg in enumerate(anthropic_request.messages):
        content = msg.content
        if isinstance(content, str):
            messages.append({"role": msg.role, "content": content})
        else:
            # 用户消息中 tool_result 的特殊处理
            # OpenAI/LiteLLM 格式期望助手调用工具，
            # 用户的下一条消息包含作为纯文本的结果
            if msg.role == "user" and any(block.type == "tool_result" for block in content if hasattr(block, "type")):
                # 对于包含 tool_result 的用户消息，拆分为单独的消息
                text_content = ""
                
                # 提取所有文本部分并连接它们
                for block in content:
                    if hasattr(block, "type"):
                        if block.type == "text":
                            text_content += block.text + "\n"
                        elif block.type == "tool_result":
                            # 将工具结果作为单独的消息添加 - 模拟正常流程
                            tool_id = block.tool_use_id if hasattr(block, "tool_use_id") else ""
                            
                            # 处理不同格式的工具结果内容
                            result_content = ""
                            if hasattr(block, "content"):
                                if isinstance(block.content, str):
                                    result_content = block.content
                                elif isinstance(block.content, list):
                                    # 如果内容是块列表，从每个块中提取文本
                                    for content_block in block.content:
                                        if hasattr(content_block, "type") and content_block.type == "text":
                                            result_content += content_block.text + "\n"
                                        elif isinstance(content_block, dict) and content_block.get("type") == "text":
                                            result_content += content_block.get("text", "") + "\n"
                                        elif isinstance(content_block, dict):
                                            # 通过尝试提取文本或转换为 JSON 来处理任何字典
                                            if "text" in content_block:
                                                result_content += content_block.get("text", "") + "\n"
                                            else:
                                                try:
                                                    result_content += json.dumps(content_block) + "\n"
                                                except:
                                                    result_content += str(content_block) + "\n"
                                elif isinstance(block.content, dict):
                                    # 处理字典内容
                                    if block.content.get("type") == "text":
                                        result_content = block.content.get("text", "")
                                    else:
                                        try:
                                            result_content = json.dumps(block.content)
                                        except:
                                            result_content = str(block.content)
                                else:
                                    # 通过转换为字符串来处理任何其他类型
                                    try:
                                        result_content = str(block.content)
                                    except:
                                        result_content = "Unparseable content"
                            
                            # 在 OpenAI 格式中，工具结果来自用户（而不是内容块）
                            text_content += f"Tool result for {tool_id}:\n{result_content}\n"
                
                # 作为包含所有内容的单个用户消息添加
                messages.append({"role": "user", "content": text_content.strip()})
            else:
                # 其他消息类型的常规处理
                processed_content = []
                for block in content:
                    if hasattr(block, "type"):
                        if block.type == "text":
                            processed_content.append({"type": "text", "text": block.text})
                        elif block.type == "image":
                            processed_content.append({"type": "image", "source": block.source})
                        elif block.type == "tool_use":
                            # 如果需要，处理工具使用块
                            processed_content.append({
                                "type": "tool_use",
                                "id": block.id,
                                "name": block.name,
                                "input": block.input
                            })
                        elif block.type == "tool_result":
                            # 处理不同格式的工具结果内容
                            processed_content_block = {
                                "type": "tool_result",
                                "tool_use_id": block.tool_use_id if hasattr(block, "tool_use_id") else ""
                            }
                            
                            # 正确处理内容字段
                            if hasattr(block, "content"):
                                if isinstance(block.content, str):
                                    # 如果是简单字符串，为其创建文本块
                                    processed_content_block["content"] = [{"type": "text", "text": block.content}]
                                elif isinstance(block.content, list):
                                    # 如果已经是块列表，保持不变
                                    processed_content_block["content"] = block.content
                                else:
                                    # 默认回退
                                    processed_content_block["content"] = [{"type": "text", "text": str(block.content)}]
                            else:
                                # 默认空内容
                                processed_content_block["content"] = [{"type": "text", "text": ""}]
                                
                            processed_content.append(processed_content_block)
                
                messages.append({"role": msg.role, "content": processed_content})
    
    # 将 OpenAI 模型的 max_tokens 限制为 16384
    max_tokens = anthropic_request.max_tokens
    if anthropic_request.model.startswith("openai/") or anthropic_request.model.startswith("gemini/"):
        max_tokens = min(max_tokens, 16384)
        logger.debug(f"将 OpenAI/Gemini 模型的 max_tokens 限制为 16384（原始值：{anthropic_request.max_tokens}）")
    
    # 创建 LiteLLM 请求字典
    litellm_request = {
        "model": anthropic_request.model,  # 理解 "anthropic/claude-x" 格式
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": anthropic_request.temperature,
        "stream": anthropic_request.stream,
    }
    
    # 如果存在，添加可选参数
    if anthropic_request.stop_sequences:
        litellm_request["stop"] = anthropic_request.stop_sequences
    
    if anthropic_request.top_p:
        litellm_request["top_p"] = anthropic_request.top_p
    
    if anthropic_request.top_k:
        litellm_request["top_k"] = anthropic_request.top_k
    
    # 将工具转换为 OpenAI 格式
    if anthropic_request.tools:
        openai_tools = []
        is_gemini_model = anthropic_request.model.startswith("gemini/")

        for tool in anthropic_request.tools:
            # 如果是 pydantic 模型，转换为字典
            if hasattr(tool, 'dict'):
                tool_dict = tool.dict()
            else:
                # 确保 tool_dict 是字典，如果 'tool' 不是字典类型则处理可能的错误
                try:
                    tool_dict = dict(tool) if not isinstance(tool, dict) else tool
                except (TypeError, ValueError):
                     logger.error(f"无法将工具转换为字典：{tool}")
                     continue # 如果转换失败则跳过此工具

            # 如果目标是 Gemini 模型，清理模式
            input_schema = tool_dict.get("input_schema", {})
            if is_gemini_model:
                 logger.debug(f"清理 Gemini 工具的模式：{tool_dict.get('name')}")
                 input_schema = clean_gemini_schema(input_schema)

            # 创建 OpenAI 兼容的函数工具
            openai_tool = {
                "type": "function",
                "function": {
                    "name": tool_dict["name"],
                    "description": tool_dict.get("description", ""),
                    "parameters": input_schema # 使用可能已清理的模式
                }
            }
            openai_tools.append(openai_tool)

        litellm_request["tools"] = openai_tools
    
    # 如果存在，将 tool_choice 转换为 OpenAI 格式
    if anthropic_request.tool_choice:
        if hasattr(anthropic_request.tool_choice, 'dict'):
            tool_choice_dict = anthropic_request.tool_choice.dict()
        else:
            tool_choice_dict = anthropic_request.tool_choice
            
        # 处理 Anthropic 的 tool_choice 格式
        choice_type = tool_choice_dict.get("type")
        if choice_type == "auto":
            litellm_request["tool_choice"] = "auto"
        elif choice_type == "any":
            litellm_request["tool_choice"] = "any"
        elif choice_type == "tool" and "name" in tool_choice_dict:
            litellm_request["tool_choice"] = {
                "type": "function",
                "function": {"name": tool_choice_dict["name"]}
            }
        else:
            # 如果无法确定，默认为 auto
            litellm_request["tool_choice"] = "auto"
    
    return litellm_request


def convert_litellm_to_anthropic(litellm_response: Union[Dict[str, Any], Any], 
                                 original_request: MessagesRequest):
    """将 LiteLLM（OpenAI 格式）响应转换为 Anthropic API 响应格式。"""
    
    # 增强的响应提取和更好的错误处理
    try:
        # 获取干净的模型名称以检查功能
        clean_model = original_request.model
        if clean_model.startswith("anthropic/"):
            clean_model = clean_model[len("anthropic/"):]
        elif clean_model.startswith("openai/"):
            clean_model = clean_model[len("openai/"):]
        
        # 检查这是否是 Claude 模型（支持内容块）
        is_claude_model = clean_model.startswith("claude-")
        
        # 处理来自 LiteLLM 的 ModelResponse 对象
        if hasattr(litellm_response, 'choices') and hasattr(litellm_response, 'usage'):
            # 直接从 ModelResponse 对象提取数据
            choices = litellm_response.choices
            message = choices[0].message if choices and len(choices) > 0 else None
            content_text = message.content if message and hasattr(message, 'content') else ""
            tool_calls = message.tool_calls if message and hasattr(message, 'tool_calls') else None
            finish_reason = choices[0].finish_reason if choices and len(choices) > 0 else "stop"
            usage_info = litellm_response.usage
            response_id = getattr(litellm_response, 'id', f"msg_{uuid.uuid4()}")
        else:
            # 向后兼容 - 处理字典响应
            # 如果响应是字典，使用它，否则尝试转换为字典
            try:
                response_dict = litellm_response if isinstance(litellm_response, dict) else litellm_response.dict()
            except AttributeError:
                # 如果 .dict() 失败，尝试使用 model_dump 或 __dict__ 
                try:
                    response_dict = litellm_response.model_dump() if hasattr(litellm_response, 'model_dump') else litellm_response.__dict__
                except AttributeError:
                    # 回退 - 手动提取属性
                    response_dict = {
                        "id": getattr(litellm_response, 'id', f"msg_{uuid.uuid4()}"),
                        "choices": getattr(litellm_response, 'choices', [{}]),
                        "usage": getattr(litellm_response, 'usage', {})
                    }
                    
            # 从响应字典中提取内容
            choices = response_dict.get("choices", [{}])
            message = choices[0].get("message", {}) if choices and len(choices) > 0 else {}
            content_text = message.get("content", "")
            tool_calls = message.get("tool_calls", None)
            finish_reason = choices[0].get("finish_reason", "stop") if choices and len(choices) > 0 else "stop"
            usage_info = response_dict.get("usage", {})
            response_id = response_dict.get("id", f"msg_{uuid.uuid4()}")
        
        # 为 Anthropic 格式创建内容列表
        content = []
        
        # 如果存在文本内容块，则添加（对于纯工具调用响应，文本可能为 None 或空）
        if content_text is not None and content_text != "":
            content.append({"type": "text", "text": content_text})
        
        # 如果存在工具调用，则添加（Anthropic 格式中的 tool_use）- 仅适用于 Claude 模型
        if tool_calls and is_claude_model:
            logger.debug(f"处理工具调用：{tool_calls}")
            
            # 如果还不是列表，则转换为列表
            if not isinstance(tool_calls, list):
                tool_calls = [tool_calls]
                
            for idx, tool_call in enumerate(tool_calls):
                logger.debug(f"处理工具调用 {idx}：{tool_call}")
                
                # 根据是字典还是对象提取函数数据
                if isinstance(tool_call, dict):
                    function = tool_call.get("function", {})
                    tool_id = tool_call.get("id", f"tool_{uuid.uuid4()}")
                    name = function.get("name", "")
                    arguments = function.get("arguments", "{}")
                else:
                    function = getattr(tool_call, "function", None)
                    tool_id = getattr(tool_call, "id", f"tool_{uuid.uuid4()}")
                    name = getattr(function, "name", "") if function else ""
                    arguments = getattr(function, "arguments", "{}") if function else "{}"
                
                # 如果需要，将字符串参数转换为字典
                if isinstance(arguments, str):
                    try:
                        arguments = json.loads(arguments)
                    except json.JSONDecodeError:
                        logger.warning(f"解析工具参数为 JSON 失败：{arguments}")
                        arguments = {"raw": arguments}
                
                logger.debug(f"添加 tool_use 块：id={tool_id}, name={name}, input={arguments}")
                
                content.append({
                    "type": "tool_use",
                    "id": tool_id,
                    "name": name,
                    "input": arguments
                })
        elif tool_calls and not is_claude_model:
            # 对于非 Claude 模型，将工具调用转换为文本格式
            logger.debug(f"为非 Claude 模型将工具调用转换为文本：{clean_model}")
            
            # 我们将工具信息附加到文本内容
            tool_text = "\n\n工具使用：\n"
            
            # 如果还不是列表，则转换为列表
            if not isinstance(tool_calls, list):
                tool_calls = [tool_calls]
                
            for idx, tool_call in enumerate(tool_calls):
                # 根据是字典还是对象提取函数数据
                if isinstance(tool_call, dict):
                    function = tool_call.get("function", {})
                    tool_id = tool_call.get("id", f"tool_{uuid.uuid4()}")
                    name = function.get("name", "")
                    arguments = function.get("arguments", "{}")
                else:
                    function = getattr(tool_call, "function", None)
                    tool_id = getattr(tool_call, "id", f"tool_{uuid.uuid4()}")
                    name = getattr(function, "name", "") if function else ""
                    arguments = getattr(function, "arguments", "{}") if function else "{}"
                
                # 如果需要，将字符串参数转换为字典
                if isinstance(arguments, str):
                    try:
                        args_dict = json.loads(arguments)
                        arguments_str = json.dumps(args_dict, indent=2)
                    except json.JSONDecodeError:
                        arguments_str = arguments
                else:
                    arguments_str = json.dumps(arguments, indent=2)
                
                tool_text += f"工具：{name}\n参数：{arguments_str}\n\n"
            
            # 添加或附加工具文本到内容
            if content and content[0]["type"] == "text":
                content[0]["text"] += tool_text
            else:
                content.append({"type": "text", "text": tool_text})
        
        # 获取使用信息 - 从对象或字典中安全提取值
        if isinstance(usage_info, dict):
            prompt_tokens = usage_info.get("prompt_tokens", 0)
            completion_tokens = usage_info.get("completion_tokens", 0)
        else:
            prompt_tokens = getattr(usage_info, "prompt_tokens", 0)
            completion_tokens = getattr(usage_info, "completion_tokens", 0)
        
        # 将 OpenAI finish_reason 映射到 Anthropic stop_reason
        stop_reason = None
        if finish_reason == "stop":
            stop_reason = "end_turn"
        elif finish_reason == "length":
            stop_reason = "max_tokens"
        elif finish_reason == "tool_calls":
            stop_reason = "tool_use"
        else:
            stop_reason = "end_turn"  # 默认
        
        # 确保内容永远不为空
        if not content:
            content.append({"type": "text", "text": ""})
        
        # 创建 Anthropic 风格的响应
        from models import MessagesResponse, Usage
        anthropic_response = MessagesResponse(
            id=response_id,
            model=original_request.model,
            role="assistant",
            content=content,
            stop_reason=stop_reason,
            stop_sequence=None,
            usage=Usage(
                input_tokens=prompt_tokens,
                output_tokens=completion_tokens
            )
        )
        
        return anthropic_response
        
    except Exception as e:
        import traceback
        error_traceback = traceback.format_exc()
        error_message = f"转换响应时出错：{str(e)}\n\n完整回溯：\n{error_traceback}"
        logger.error(error_message)
        
        # 如果出现任何错误，创建回退响应
        from models import MessagesResponse, Usage
        return MessagesResponse(
            id=f"msg_{uuid.uuid4()}",
            model=original_request.model,
            role="assistant",
            content=[{"type": "text", "text": f"转换响应时出错：{str(e)}。请检查服务器日志。"}],
            stop_reason="end_turn",
            usage=Usage(input_tokens=0, output_tokens=0)
        )