import httpx
from fastapi import HTTPException, Request
from .config import settings
from .models import ModelMapper
import json
import logging

class ProxyService:
    """代理服务类，负责处理请求转发和响应转换"""
    
    def __init__(self):
        self.model_mapper = ModelMapper()
        self.timeout = settings.REQUEST_TIMEOUT
        self.logger = logging.getLogger(__name__)

    async def _forward_request(self, request: Request, cloud_model_name: str):
        """
        转发请求到云端大模型
        
        Args:
            request (Request): FastAPI请求对象
            cloud_model_name (str): 云端大模型名称
            
        Returns:
            dict: 云端大模型的原始响应
            
        Raises:
            HTTPException: 当请求转发失败时抛出异常
        """
        # 获取请求体
        body = await request.json()
        
        # 设置目标URL
        if settings.CLOUD_MODEL_PROVIDER == "openai":
            url = f"{settings.OPENAI_API_BASE}/chat/completions"
            headers = {
                "Authorization": f"Bearer {settings.OPENAI_API_KEY}",
                "Content-Type": "application/json"
            }
        elif settings.CLOUD_MODEL_PROVIDER == "aliyun":
            url = settings.ALIYUN_API_BASE
            headers = {
                "Authorization": f"Bearer {settings.ALIYUN_API_KEY}",
                "Content-Type": "application/json",
                "X-DashScope-Model": cloud_model_name
            }
        else:
            raise HTTPException(
                status_code=500,
                detail=f"Unsupported cloud model provider: {settings.CLOUD_MODEL_PROVIDER}"
            )
        
        # 修改模型名称为云端模型名称
        body["model"] = cloud_model_name
        
        # 发送请求
        try:
            async with httpx.AsyncClient(timeout=self.timeout) as client:
                response = await client.post(
                    url,
                    headers=headers,
                    json=body
                )
            
            # 处理响应
            if response.status_code != 200:
                self.logger.error(f"Error response from cloud model: {response.text}")
                raise HTTPException(
                    status_code=response.status_code,
                    detail=f"Error from cloud model service: {response.text}"
                )
            
            return response.json()
            
        except httpx.TimeoutException:
            self.logger.error("Request timeout")
            raise HTTPException(
                status_code=504,
                detail="Gateway Timeout: The request to the cloud model service timed out."
            )
        except Exception as e:
            self.logger.error(f"Unexpected error: {str(e)}")
            raise HTTPException(
                status_code=500,
                detail=f"Internal Server Error: {str(e)}"
            )

    async def _convert_to_ollama_format(self, cloud_response: dict) -> str:
        """
        将云端大模型的响应转换为Ollama兼容格式
        
        Args:
            cloud_response (dict): 云端大模型的原始响应
            
        Returns:
            str: Ollama兼容格式的响应字符串
        """
        # 这里需要根据不同的云端模型提供商进行不同的转换
        if settings.CLOUD_MODEL_PROVIDER == "openai":
            # OpenAI格式转换为Ollama格式
            ollama_response = {
                "model": cloud_response.get("model", "unknown"),
                "created_at": cloud_response.get("created", 0),
                "response": cloud_response["choices"][0]["message"]["content"] if cloud_response.get("choices") else "",
                "done": True
            }
            
            # 处理流式响应
            if "choices" in cloud_response and len(cloud_response["choices"]) > 0:
                delta = cloud_response["choices"][0].get("delta", {})
                if "content" in delta:
                    ollama_response["response"] = delta["content"]
                    ollama_response["done"] = False
        
        elif settings.CLOUD_MODEL_PROVIDER == "aliyun":
            # 阿里云百炼大模型格式转换为Ollama格式
            ollama_response = {
                "model": cloud_response.get("output", {}).get("text_generation_model", "unknown"),
                "created_at": cloud_response.get("output", {}).get("finish_time", 0),
                "response": cloud_response.get("output", {}).get("text", ""),
                "done": True
            }
            
        else:
            raise HTTPException(
                status_code=500,
                detail=f"Unsupported cloud model provider: {settings.CLOUD_MODEL_PROVIDER}"
            )
        
        return json.dumps(ollama_response)

    async def stream_proxy(self, request: Request, ollama_model_name: str):
        """
        流式代理服务，将云端大模型的响应转换为Ollama兼容的流式响应
        
        Args:
            request (Request): FastAPI请求对象
            ollama_model_name (str): 请求的Ollama模型名称
            
        Yields:
            str: Ollama兼容的流式响应
        """
        cloud_model_name = self.model_mapper.get_cloud_model_name(ollama_model_name)
        
        # 获取请求体
        body = await request.json()
        
        # 设置目标URL
        if settings.CLOUD_MODEL_PROVIDER == "openai":
            url = f"{settings.OPENAI_API_BASE}/chat/completions"
            headers = {
                "Authorization": f"Bearer {settings.OPENAI_API_KEY}",
                "Content-Type": "application/json"
            }
            # 启用流式响应
            body["stream"] = True
            
        elif settings.CLOUD_MODEL_PROVIDER == "aliyun":
            url = settings.ALIYUN_API_BASE
            headers = {
                "Authorization": f"Bearer {settings.ALIYUN_API_KEY}",
                "Content-Type": "application/json",
                "X-DashScope-Model": cloud_model_name
            }
            # 阿里云百炼大模型需要特殊处理
            # 需要确保请求参数中包含流式标志
            
        else:
            raise HTTPException(
                status_code=500,
                detail=f"Unsupported cloud model provider: {settings.CLOUD_MODEL_PROVIDER}"
            )
        
        # 修改模型名称为云端模型名称
        body["model"] = cloud_model_name
        
        # 创建流式请求
        try:
            async with httpx.AsyncClient(timeout=self.timeout) as client:
                async with client.stream(
                    "POST",
                    url,
                    headers=headers,
                    json=body
                ) as response:
                    
                    if response.status_code != 200:
                        self.logger.error(f"Error response from cloud model: {await response.aread()}")
                        raise HTTPException(
                            status_code=response.status_code,
                            detail=f"Error from cloud model service: {await response.aread()}"
                        )
                    
                    # 处理流式响应
                    async for chunk in response.aiter_text():
                        # 根据不同厂商处理响应
                        if settings.CLOUD_MODEL_PROVIDER == "openai":
                            # 处理OpenAI的流式响应
                            if chunk.strip() == "data: [DONE]":
                                yield await self._convert_to_ollama_format({"done": True})
                                continue
                            
                            if chunk.startswith("data: "):
                                try:
                                    content = json.loads(chunk[6:])
                                    if "choices" in content and len(content["choices"]) > 0:
                                        choice = content["choices"][0]
                                        if "delta" in choice and "content" in choice["delta"]:
                                            # 转换为Ollama格式
                                            yield await self._convert_to_ollama_format(content)
                                except json.JSONDecodeError:
                                    pass
                        
                        elif settings.CLOUD_MODEL_PROVIDER == "aliyun":
                            # 处理阿里云百炼大模型的响应
                            if chunk.strip():
                                try:
                                    content = json.loads(chunk)
                                    # 转换为Ollama格式
                                    yield await self._convert_to_ollama_format(content)
                                except json.JSONDecodeError:
                                    pass
            
        except httpx.TimeoutException:
            self.logger.error("Request timeout")
            raise HTTPException(
                status_code=504,
                detail="Gateway Timeout: The request to the cloud model service timed out."
            )
        except Exception as e:
            self.logger.error(f"Unexpected error: {str(e)}")
            raise HTTPException(
                status_code=500,
                detail=f"Internal Server Error: {str(e)}"
            )

    async def non_stream_proxy(self, request: Request, ollama_model_name: str) -> dict:
        """
        非流式代理服务，将云端大模型的响应转换为Ollama兼容格式
        
        Args:
            request (Request): FastAPI请求对象
            ollama_model_name (str): 请求的Ollama模型名称
            
        Returns:
            dict: Ollama兼容格式的响应
        """
        cloud_model_name = self.model_mapper.get_cloud_model_name(ollama_model_name)
        cloud_response = await self._forward_request(request, cloud_model_name)
        return json.loads(await self._convert_to_ollama_format(cloud_response))
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import StreamingResponse
from .proxy_service import ProxyService
from .config import settings
from .models import get_ollama_models

app = FastAPI()
proxy_service = ProxyService()

@app.post("/api/generate")
async def generate(request: Request):
    """处理生成请求，支持流式和非流式响应"""
    data = await request.json()
    
    # 校验必填参数
    if not data.get("model"):
        raise HTTPException(status_code=400, detail="Missing required parameter: model")
    if not data.get("prompt"):
        raise HTTPException(status_code=400, detail="Missing required parameter: prompt")
    
    # 处理流式请求
    if data.get("stream", False):
        return StreamingResponse(
            proxy_service.stream_proxy(request, data["model"]),
            media_type="application/x-ndjson"
        )
    
    # 处理非流式请求
    return await proxy_service.non_stream_proxy(request, data["model"])

@app.get("/api/models")
async def list_models():
    """返回模拟的本地模型列表"""
    return get_ollama_models()