from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import logging
from contextlib import asynccontextmanager
import os
from typing import Optional
from dotenv import load_dotenv
load_dotenv()

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 模型类型配置
MODEL_TYPE = os.getenv("MODEL_TYPE", "ollama")  # ollama/siliconflow
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "qwen2.5:latest")
SILICONFLOW_MODEL = os.getenv("SILICONFLOW_MODEL", "deepseek-ai/DeepSeek-V3")

# 异步生命周期管理
@asynccontextmanager
async def lifespan(app: FastAPI):
    # 根据配置加载不同的模型后端
    if MODEL_TYPE == "ollama":
        logger.info("正在初始化Ollama后端...")
        app.state.model_backend = OllamaBackend(OLLAMA_MODEL)
    elif MODEL_TYPE == "siliconflow":
        logger.info("正在初始化硅基流动后端...")
        app.state.model_backend = SiliconFlowBackend(SILICONFLOW_MODEL)
    else:
        raise ValueError(f"不支持的模型类型: {MODEL_TYPE}")
    
    logger.info(f"模型后端初始化完成，类型: {MODEL_TYPE}")
    yield
    # 关闭时清理资源
    logger.info("清理模型资源...")
    del app.state.model_backend

# 创建FastAPI应用
app = FastAPI(title="统一模型API", lifespan=lifespan)

# 允许跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 请求数据模型
class GenerationRequest(BaseModel):
    prompt: str
    max_length: Optional[int] = 512
    temperature: Optional[float] = 0.7
    top_p: Optional[float] = 0.9
    top_k: Optional[int] = 50
    repetition_penalty: Optional[float] = 1.1

# 模型后端基类
class ModelBackend:
    def generate(self, request: GenerationRequest) -> str:
        raise NotImplementedError("子类必须实现此方法")

# Ollama模型后端
class OllamaBackend(ModelBackend):
    def __init__(self, model_name):
        try:
            from ollama import chat, Message
            self.chat = chat
            self.Message = Message
            self.model_name = model_name.replace('_', ':')
            logger.info(f"Ollama后端初始化完成，模型: {self.model_name}")
        except ImportError:
            logger.error("无法导入ollama模块，请确保已安装ollama Python包")
            raise
        except Exception as e:
            logger.error(f"Ollama初始化失败: {str(e)}")
            raise

    def generate(self, request: GenerationRequest) -> str:
        try:
            response = self.chat(
                model=self.model_name,
                messages=[
                    self.Message(role='system', content="You are a helpful assistant."),
                    self.Message(role='user', content=request.prompt)
                ],
                stream=False,
                options={
                    'temperature': request.temperature,
                    'top_p': request.top_p,
                    'num_predict': request.max_length
                }
            )
            return response['message']['content'] if response['message'] else ""
        except Exception as e:
            logger.error(f"Ollama生成失败: {str(e)}")
            raise

# 硅基流动模型后端
class SiliconFlowBackend(ModelBackend):
    def __init__(self, model_name):
        try:
            from openai import OpenAI
            import os
            self.client = OpenAI(
                api_key=os.getenv("SILICONFLOW_API_KEY"),
                base_url="https://api.siliconflow.cn/v1"
            )
            self.model_name = model_name
            logger.info(f"硅基流动后端初始化完成，模型: {self.model_name}")
        except ImportError:
            logger.error("无法导入openai模块，请确保已安装openai Python包")
            raise
        except Exception as e:
            logger.error(f"硅基流动初始化失败: {str(e)}")
            raise

    def generate(self, request: GenerationRequest) -> str:
        try:
            completion = self.client.chat.completions.create(
                model=self.model_name,
                temperature=request.temperature,
                max_tokens=request.max_length,
                top_p=request.top_p,
                messages=[
                    {"role": "system", "content": "You are a helpful assistant."},
                    {"role": "user", "content": request.prompt}
                ],
            )
            return completion.choices[0].message.content
        except Exception as e:
            logger.error(f"硅基流动生成失败: {str(e)}")
            raise

# API端点
@app.post("/generate")
async def generate(request: GenerationRequest):
    try:
        logger.info(f"收到生成请求: {request.prompt[:50]}...")
        
        # 检查请求长度
        if len(request.prompt) > 2000:
            raise HTTPException(
                status_code=400, 
                detail="输入过长，最大支持2000字符"
            )
        
        generated_text = app.state.model_backend.generate(request)
        return {"generated_text": generated_text}
    except Exception as e:
        logger.error(f"API错误: {str(e)}")
        raise HTTPException(
            status_code=500, 
            detail=f"文本生成失败: {str(e)}"
        )

# 健康检查
@app.get("/health")
def health_check():
    return {"status": "healthy", "model_type": MODEL_TYPE}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        app,
        host="0.0.0.0", 
        port=8000,
        reload=False
    )