from fastapi import APIRouter, Depends, HTTPException, Request, status, File, UploadFile, Form
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any, Union, Tuple
import os
import json
import time
import uuid
import base64
from dotenv import load_dotenv
# 从当前目录导入config
from . import config
# 导入get_file_type_prompt模块
from . import get_file_type_prompt

# 加载环境变量
load_dotenv()

# 获取API密钥
API_KEY = os.getenv("CUSTOM_COMPAT_API_KEY", "dummy-key")

# 创建安全依赖
security = HTTPBearer()

# 创建路由
router = APIRouter(prefix="/v1", tags=["OpenAI Compatible API"])

# 定义模型
class Message(BaseModel):
    role: str
    content: Union[str, List[Dict[str, Any]]]

class ChatCompletionRequest(BaseModel):
    model: Optional[str] = config.select_model  # 模型名称，默认使用config中的select_model
    messages: List[Message] = []  # 消息列表
    temperature: Optional[float] = 0.7  # 温度参数
    top_p: Optional[float] = 1.0  # top_p参数
    max_tokens: Optional[int] = 1024  # 最大生成token数，默认1024
    stream: Optional[bool] = False  # 是否流式输出
    
class ChatCompletionChoice(BaseModel):
    index: int
    message: Message
    finish_reason: str

class Usage(BaseModel):
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int

class ChatCompletionResponse(BaseModel):
    id: str
    object: str
    created: int
    model: str
    choices: List[ChatCompletionChoice]
    usage: Usage

class ModelData(BaseModel):
    id: str
    object: str = "model"
    owned_by: str = "organization"
    permission: List = []

class ModelsResponse(BaseModel):
    object: str = "list"
    data: List[ModelData]

# 验证API密钥
async def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
    if credentials.credentials != API_KEY:
        raise HTTPException(
            status_code=status.HTTP_401_UNAUTHORIZED,
            detail="Invalid API key",
        )
    return credentials.credentials

# 获取模型列表
@router.get("/models", response_model=ModelsResponse)
async def get_models(api_key: str = Depends(verify_api_key)):
    # 使用config获取可用模型
    try:
        # 首先尝试获取已测试的可用模型
        available_models = config.get_available_models()
        # 如果模型测试尚未完成或没有可用模型，则返回所有配置的模型
        if not available_models:
            available_models = list(config.model_configs.keys())
        
        models = [ModelData(id=model_id) for model_id in available_models]
        return ModelsResponse(data=models)
    except Exception as e:
        # 出现任何错误，返回所有配置的模型
        all_models = list(config.model_configs.keys())
        models = [ModelData(id=model_id) for model_id in all_models]
        return ModelsResponse(data=models)

# ----- 辅助函数：模型选择和验证 -----
def select_appropriate_model(requested_model: str) -> Tuple[str, str, bool]:
    """
    选择合适的模型并验证其可用性
    
    Args:
        requested_model: 请求的模型名称
        
    Returns:
        Tuple[str, str, bool]: (有效模型名称, 内部模型名称, 是否为视觉模型)
    """
    # 确定使用的模型：使用请求体中的model或默认的select_model
    effective_model = requested_model or config.select_model
    
    # 获取可用模型列表
    available_models = config.get_available_models()
    
    # 检查模型是否在配置中
    if effective_model not in config.model_configs:
        # 构建可用模型列表作为字符串
        available_models_str = ", ".join(available_models)
        print(f"请求的模型 '{effective_model}' 不在配置中。可用模型: {available_models_str}")
        
        # 尝试使用默认模型
        if config.select_model in config.model_configs:
            print(f"使用默认模型 '{config.select_model}' 替代。")
            effective_model = config.select_model
        else:
            # 如果默认模型也不在配置中，尝试使用第一个可用模型
            if available_models:
                effective_model = available_models[0]
                print(f"使用第一个可用模型 '{effective_model}' 替代。")
            else:
                # 如果没有可用模型，返回错误
                raise ValueError(f"模型 '{effective_model}' 不存在于配置中，且没有可用的替代模型。")
    
    print(f"使用模型: {effective_model}")
    
    # 检查是否是视觉语言模型（名称中包含vl或VL）
    is_vl_model = False
    model_name_internal = ""
    
    try:
        _, model_name_internal = config.get_client(effective_model)
        is_vl_model = "vl" in model_name_internal.lower() or "vision" in model_name_internal.lower()
        print(f"内部模型名称: {model_name_internal}, 是否为视觉语言模型: {is_vl_model}")
    except Exception as e:
        print(f"获取模型内部名称时出错: {str(e)}")
        raise ValueError(f"获取模型内部名称时出错: {str(e)}")
    
    return effective_model, model_name_internal, is_vl_model

# 检查消息是否仅包含纯文本
def check_text_only_messages(messages: List[Message]) -> bool:
    """
    检查消息是否只包含纯文本
    
    Args:
        messages: 消息列表
        
    Returns:
        bool: 是否只包含纯文本消息
    """
    for msg in messages:
        if not isinstance(msg.content, str):
            return False
    return True

# 如果视觉模型接收纯文本，找到替代文本模型
def find_text_model_alternative(vl_model: str) -> Optional[str]:
    """
    为视觉语言模型找到文本替代模型
    
    Args:
        vl_model: 视觉语言模型名称
        
    Returns:
        Optional[str]: 文本模型名称，如果没有找到则返回None
    """
    # 尝试匹配一个非VL模型
    text_model_candidates = []
    for model_key in config.model_configs:
        try:
            _, internal_name = config.get_client(model_key)
            if "vl" not in internal_name.lower() and "vision" not in internal_name.lower():
                text_model_candidates.append(model_key)
        except:
            continue
    
    # 如果找到了纯文本模型，使用该模型替代
    if text_model_candidates:
        new_model = text_model_candidates[0]
        print(f"找到纯文本模型: {new_model}，使用它替代视觉语言模型")
        return new_model
    
    return None

# 将消息转换为API格式
def format_messages_for_api(messages: List[Message], is_vl_model: bool) -> List[Dict[str, Any]]:
    """
    将请求消息转换为API格式
    
    Args:
        messages: 消息列表
        is_vl_model: 是否为视觉语言模型
        
    Returns:
        List[Dict[str, Any]]: 格式化后的消息列表
    """
    messages_dict = []
    for msg in messages:
        if isinstance(msg.content, str):
            # 处理字符串内容
            if is_vl_model:
                # 对于VL模型，将纯文本转换为带有文本类型的列表格式
                messages_dict.append({
                    "role": msg.role, 
                    "content": [{"type": "text", "text": msg.content}]
                })
            else:
                # 对于普通模型，保持纯文本格式
                messages_dict.append({"role": msg.role, "content": msg.content})
        elif isinstance(msg.content, list):
            # 处理复杂内容（包含图片等）
            content_list = []
            for item in msg.content:
                if isinstance(item, dict):
                    # 原样保留字典项
                    content_list.append(item)
                else:
                    # 将非字典项转为文本
                    content_list.append({"type": "text", "text": str(item)})
            messages_dict.append({"role": msg.role, "content": content_list})
        else:
            # 处理其他类型的内容（默认转为字符串）
            if is_vl_model:
                # 对于VL模型，使用列表格式
                messages_dict.append({
                    "role": msg.role, 
                    "content": [{"type": "text", "text": str(msg.content)}]
                })
            else:
                # 对于普通模型，使用字符串格式
                messages_dict.append({"role": msg.role, "content": str(msg.content)})
    
    return messages_dict

# 将复杂消息简化为纯文本
def simplify_messages_to_text(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    """
    将复杂消息简化为纯文本格式
    
    Args:
        messages: 消息列表
        
    Returns:
        List[Dict[str, Any]]: 简化后的消息列表
    """
    simple_messages = []
    for msg in messages:
        if isinstance(msg["content"], list):
            # 提取所有text类型的内容
            texts = []
            for item in msg["content"]:
                if isinstance(item, dict) and item.get("type") == "text":
                    texts.append(item.get("text", ""))
            simple_messages.append({"role": msg["role"], "content": " ".join(texts)})
        else:
            simple_messages.append(msg)
    
    return simple_messages

# 构建API响应
def build_api_response(content: str, model: str, response) -> ChatCompletionResponse:
    """
    构建API响应
    
    Args:
        content: 生成的内容
        model: 使用的模型名称
        response: API响应对象
        
    Returns:
        ChatCompletionResponse: 格式化的响应
    """
    return ChatCompletionResponse(
        id=f"chatcmpl-{uuid.uuid4()}",
        object="chat.completion",
        created=int(time.time()),
        model=model,  # 使用实际使用的模型
        choices=[
            ChatCompletionChoice(
                index=0,
                message=Message(role="assistant", content=content),
                finish_reason="stop"
            )
        ],
        usage=Usage(
            prompt_tokens=response.usage.prompt_tokens if hasattr(response, 'usage') and hasattr(response.usage, 'prompt_tokens') else 0,
            completion_tokens=response.usage.completion_tokens if hasattr(response, 'usage') and hasattr(response.usage, 'completion_tokens') else 0,
            total_tokens=response.usage.total_tokens if hasattr(response, 'usage') and hasattr(response.usage, 'total_tokens') else 0
        )
    )

# 聊天完成
@router.post("/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(
    request: ChatCompletionRequest,
    api_key: str = Depends(verify_api_key)
):
    try:
        # 1. 选择合适的模型
        effective_model, model_name_internal, is_vl_model = select_appropriate_model(request.model)
        
        # 2. 检查消息类型与模型是否匹配
        has_only_text_messages = check_text_only_messages(request.messages)
        
        # 3. 如果是视觉模型但只有纯文本消息，尝试找文本模型替代
        text_model_candidates = []
        if is_vl_model and has_only_text_messages:
            print("检测到使用纯文本调用视觉语言模型，尝试调整模型或消息格式")
            text_model = find_text_model_alternative(effective_model)
            if text_model:
                effective_model = text_model
                _, model_name_internal = config.get_client(effective_model)
                is_vl_model = False
                # 记录可用的文本模型，以备后用
                text_model_candidates = [text_model]
        
        # 4. 将消息转换为API格式
        messages_dict = format_messages_for_api(request.messages, is_vl_model)
        
        # 5. 获取客户端
        client, model_name_internal = config.get_client(effective_model)
        print(f"内部模型名称: {model_name_internal}")
        
        # 6. 创建参数字典
        params = {
            "model": model_name_internal,
            "messages": messages_dict,
            "temperature": request.temperature,
            "top_p": request.top_p,
            "stream": request.stream
        }
        
        # 添加可选参数
        if request.max_tokens is not None:
            params["max_tokens"] = request.max_tokens
        
        # 7. 调用API并处理可能的错误
        print(f"调用API，参数: {params}")
        try:
            response = client.chat.completions.create(**params)
            content = response.choices[0].message.content
            return build_api_response(content, effective_model, response)
            
        except Exception as api_error:
            print(f"API调用错误: {str(api_error)}")
            
            # 尝试查看是否是消息格式问题
            try:
                # a. 如果是VL模型出错，尝试切换到文本模型
                if is_vl_model and text_model_candidates:
                    another_model = text_model_candidates[0]
                    if another_model != effective_model:  # 避免重复尝试相同的模型
                        print(f"尝试切换到非VL模型: {another_model}")
                        client, model_name_internal = config.get_client(another_model)
                        
                        # 重新格式化消息为纯文本格式
                        simple_messages = simplify_messages_to_text(messages_dict)
                        
                        print(f"尝试使用非VL模型和简化消息格式: {simple_messages}")
                        # 使用新模型和简化的消息重试
                        response = client.chat.completions.create(
                            model=model_name_internal,
                            messages=simple_messages,
                            temperature=request.temperature,
                            top_p=request.top_p,
                            stream=request.stream,
                            max_tokens=request.max_tokens
                        )
                        
                        content = response.choices[0].message.content
                        return build_api_response(content, another_model, response)
                
                # b. 尝试简化消息格式
                simple_messages = simplify_messages_to_text(messages_dict)
                
                print(f"尝试使用简化消息格式重试: {simple_messages}")
                # 使用简化的消息重试
                params["messages"] = simple_messages
                response = client.chat.completions.create(**params)
                
                content = response.choices[0].message.content
                return build_api_response(content, effective_model, response)
                
            except Exception as retry_error:
                # 如果重试也失败，抛出原始错误
                print(f"重试也失败: {str(retry_error)}")
                raise api_error
                
    except HTTPException:
        # 重新抛出HTTPException异常
        raise
    except ValueError as e:
        # 处理值错误（通常是配置或参数问题）
        available_models_str = ", ".join(config.get_available_models())
        print(f"值错误: {str(e)}。可用模型: {available_models_str}")
        raise HTTPException(
            status_code=status.HTTP_400_BAD_REQUEST,
            detail=f"参数错误: {str(e)}。可用模型: {available_models_str}"
        )
    except Exception as e:
        # 处理其他所有异常
        available_models_str = ", ".join(config.get_available_models())
        print(f"未知错误: {str(e)}。可用模型: {available_models_str}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"处理请求时发生错误: {str(e)}。请尝试使用其他可用模型: {available_models_str}"
        )

# 处理本地文件路径，将其转换为data URI
async def process_local_image_path(image_path: str) -> str:
    """
    将本地图片文件路径转换为data URI格式
    
    Args:
        image_path: 本地图片文件路径
        
    Returns:
        str: data URI格式的图片内容
    """
    try:
        # 检查文件是否存在
        if not os.path.exists(image_path):
            raise ValueError(f"文件不存在: {image_path}")
        
        # 读取图片文件
        with open(image_path, "rb") as img_file:
            img_data = img_file.read()
        
        # 获取文件扩展名和大小
        file_extension = os.path.splitext(image_path)[1][1:].lower()
        file_size_kb = len(img_data) / 1024
        print(f"处理本地图片: {image_path}, 大小: {file_size_kb:.2f}KB, 类型: {file_extension}")
        
        # 检查文件扩展名
        valid_extensions = ['jpg', 'jpeg', 'png', 'gif', 'webp']
        if file_extension not in valid_extensions:
            print(f"警告: 未知的文件扩展名 {file_extension}，将使用默认JPEG格式")
            file_extension = 'jpeg'
        
        # 转为base64
        base64_image = base64.b64encode(img_data).decode('utf-8')
        
        # 选择media_type
        if file_extension in ['jpg', 'jpeg']:
            media_type = 'image/jpeg'
        elif file_extension == 'png':
            media_type = 'image/png'
        elif file_extension == 'gif':
            media_type = 'image/gif'
        elif file_extension in ['webp', 'webm']:
            media_type = f'image/{file_extension}'
        else:
            media_type = 'image/jpeg'
        
        # 创建data URI
        data_uri = f"data:{media_type};base64,{base64_image}"
        print(f"本地图片已转换为data URI格式，类型: {media_type}, 共 {len(base64_image) // 1000}K 字符")
        
        return data_uri
    except Exception as e:
        print(f"处理本地图片文件失败: {str(e)}")
        raise ValueError(f"处理本地图片文件失败: {str(e)}")

# 专门用于处理图片上传的辅助函数 - 将图片转换为base64编码的内容
async def process_uploaded_image(file: UploadFile) -> Dict[str, Any]:
    """
    处理上传的图片文件，转换为OpenAI API支持的格式
    
    Args:
        file: 上传的图片文件
        
    Returns:
        Dict: 图片消息内容，符合OpenAI API格式
    """
    try:
        # 读取上传的图片文件
        file_contents = await file.read()
        file_extension = os.path.splitext(file.filename)[1][1:].lower()
        
        # 记录图片大小（但不记录内容）
        file_size_kb = len(file_contents) / 1024
        print(f"处理图片: {file.filename}, 大小: {file_size_kb:.2f}KB, 类型: {file_extension}")
        
        # 检查文件扩展名
        valid_extensions = ['jpg', 'jpeg', 'png', 'gif', 'webp']
        if file_extension not in valid_extensions:
            print(f"警告: 未知的文件扩展名 {file_extension}，将使用默认JPEG格式")
            file_extension = 'jpeg'
        
        # 将图片转换为base64编码
        base64_image = base64.b64encode(file_contents).decode('utf-8')
        
        # 根据文件扩展名选择media_type
        if file_extension in ['jpg', 'jpeg']:
            media_type = 'image/jpeg'
        elif file_extension == 'png':
            media_type = 'image/png'
        elif file_extension == 'gif':
            media_type = 'image/gif'
        elif file_extension in ['webp', 'webm']:
            media_type = f'image/{file_extension}'
        else:
            # 默认使用jpeg
            media_type = 'image/jpeg'
        
        # 格式化为OpenAI API格式 - 使用data URI
        image_url = f"data:{media_type};base64,{base64_image}"
        print(f"图片已转换为image_url格式: {media_type}, 共 {len(base64_image) // 1000}K 字符")
        
        # 返回符合OpenAI格式的图像内容
        return {
            "type": "image_url",
            "image_url": {
                "url": image_url
            }
        }
    except Exception as e:
        print(f"处理图片时出错: {str(e)}")
        raise ValueError(f"处理图片文件失败: {str(e)}")
        
# 处理请求消息中的本地文件路径
async def process_messages_with_local_images(messages: List[Message]) -> List[Message]:
    """
    处理消息列表中的本地图片文件路径，将其转换为data URI
    
    Args:
        messages: 消息列表
        
    Returns:
        List[Message]: 处理后的消息列表
    """
    processed_messages = []
    
    for msg in messages:
        if isinstance(msg.content, list):
            processed_content = []
            for item in msg.content:
                if isinstance(item, dict) and item.get("type") == "image_url":
                    image_url = item.get("image_url", {}).get("url", "")
                    # 检查是否是本地文件路径（不是http/https/data URI）
                    if image_url and not image_url.startswith(("http://", "https://", "data:")):
                        try:
                            # 转换本地文件路径为data URI
                            data_uri = await process_local_image_path(image_url)
                            # 创建新的图片项
                            processed_item = {
                                "type": "image_url",
                                "image_url": {"url": data_uri}
                            }
                            processed_content.append(processed_item)
                        except Exception as e:
                            print(f"处理本地图片路径失败: {str(e)}，保留原始路径")
                            processed_content.append(item)
                    else:
                        processed_content.append(item)
                else:
                    processed_content.append(item)
            processed_messages.append(Message(role=msg.role, content=processed_content))
        else:
            processed_messages.append(msg)
    
    return processed_messages
