from fastapi import APIRouter, Depends, HTTPException, Header
from sqlalchemy.orm import Session
from sqlalchemy import desc
from ..database import get_db
from ..models.prompt import Prompt
from ..models.model import ModelConfig
from ..models.user import User
from pydantic import BaseModel
from typing import Optional, Dict, Any
from passlib.context import CryptContext
import datetime

router = APIRouter(
    prefix='/public-prompts',
    tags=['public-prompts'],
)

# 密码加密上下文
pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')

# 验证密码
def verify_password(plain_password, hashed_password):
    return pwd_context.verify(plain_password, hashed_password)

# 模型配置响应模型
class ModelConfigResponse(BaseModel):
    id: int
    channel: Optional[str] = None
    channel_id: Optional[int] = None
    model: str
    temperature: Optional[float] = 1.0
    max_tokens: Optional[int] = 2048
    top_p: Optional[float] = 1.0
    presence_penalty: Optional[float] = 0.0
    frequency_penalty: Optional[float] = 0.0
    n: Optional[int] = 1
    stop: Optional[list] = None
    logit_bias: Optional[Dict[str, Any]] = None
    stream: Optional[bool] = False
    response_format: Optional[str] = None
    endpoint_url: Optional[str] = None
    file_path: Optional[str] = None
    api_key: Optional[str] = None
    created_at: Optional[str] = None

    class Config:
        orm_mode = True
        from_attributes = True

# 提示词响应模型
class PromptWithModelResponse(BaseModel):
    id: str
    title: str
    content: str
    description: Optional[str] = None
    user_id: str
    is_public: bool
    version: str
    tags: Optional[str] = None
    model_id: Optional[int] = None
    cover_img: Optional[str] = None
    created_at: str
    updated_at: str
    model: Optional[ModelConfigResponse] = None

    class Config:
        orm_mode = True
        from_attributes = True

# 错误响应模型
class ErrorResponse(BaseModel):
    success: bool
    message: str
    code: int

# 成功响应模型
class SuccessResponse(BaseModel):
    success: bool
    message: str
    data: PromptWithModelResponse

def validate_user_credentials(username: str, password: str, db: Session) -> bool:
    """验证用户凭据"""
    user = db.query(User).filter(User.name == username).first()
    if not user:
        return False
    # 使用bcrypt验证密码
    return verify_password(password, user.password)

@router.get("/getPromptById", response_model=SuccessResponse)
async def get_prompt_by_id(
    promptId: str,
    username: str = Header(...),
    password: str = Header(...),
    db: Session = Depends(get_db)
):
    """
    通过提示词ID获取提示词信息（包含模型数据）
    """
    # 验证用户凭据
    if not validate_user_credentials(username, password, db):
        raise HTTPException(
            status_code=401,
            detail={
                "success": False,
                "message": "用户名或密码错误",
                "code": 401
            }
        )
    
    # 查询提示词信息
    prompt = db.query(Prompt).filter(Prompt.id == promptId).first()
    if not prompt:
        raise HTTPException(
            status_code=404,
            detail={
                "success": False,
                "message": "提示词不存在",
                "code": 404
            }
        )
    
    # 查询关联的模型信息
    model = None
    if prompt.model_id:
        model = db.query(ModelConfig).filter(ModelConfig.id == prompt.model_id).first()
    
    # 构建响应数据
    response_data = PromptWithModelResponse(
        id=str(prompt.id),
        title=prompt.title,
        content=prompt.content,
        description=prompt.description,
        user_id=str(prompt.user_id),
        is_public=prompt.is_public,
        version=prompt.version,
        tags=prompt.tags,
        model_id=prompt.model_id,
        cover_img=prompt.cover_img,
        created_at=prompt.created_at.isoformat() if prompt.created_at else None,
        updated_at=prompt.updated_at.isoformat() if prompt.updated_at else None,
        model=ModelConfigResponse(
            id=model.id,
            channel=model.channel,
            channel_id=model.channel_id,
            model=model.model,
            temperature=model.temperature,
            max_tokens=model.max_tokens,
            top_p=model.top_p,
            presence_penalty=model.presence_penalty,
            frequency_penalty=model.frequency_penalty,
            n=model.n,
            stop=model.stop,
            logit_bias=model.logit_bias,
            stream=model.stream,
            response_format=model.response_format,
            endpoint_url=model.endpoint_url,
            file_path=model.file_path,
            api_key=model.api_key,
            created_at=model.created_at.isoformat() if model.created_at else None
        ) if model else None
    )
    
    return SuccessResponse(
        success=True,
        message="获取提示词信息成功",
        data=response_data
    )

@router.get("/getPromptLatest", response_model=SuccessResponse)
async def get_latest_prompt_by_title(
    title: str,
    username: str = Header(...),
    password: str = Header(...),
    db: Session = Depends(get_db)
):
    """
    通过提示词名称获取版本号最大的提示词信息（包含模型数据）
    """
    # 验证用户凭据
    if not validate_user_credentials(username, password, db):
        raise HTTPException(
            status_code=401,
            detail={
                "success": False,
                "message": "用户名或密码错误",
                "code": 401
            }
        )
    
    # 查询同名提示词，按版本号降序排列，取第一个（版本号最大的）
    prompt = db.query(Prompt).filter(
        Prompt.title == title
    ).order_by(desc(Prompt.version)).first()
    
    if not prompt:
        raise HTTPException(
            status_code=404,
            detail={
                "success": False,
                "message": "未找到该名称的提示词",
                "code": 404
            }
        )
    
    # 查询关联的模型信息
    model = None
    if prompt.model_id:
        model = db.query(ModelConfig).filter(ModelConfig.id == prompt.model_id).first()
    
    # 构建响应数据
    response_data = PromptWithModelResponse(
        id=str(prompt.id),
        title=prompt.title,
        content=prompt.content,
        description=prompt.description,
        user_id=str(prompt.user_id),
        is_public=prompt.is_public,
        version=prompt.version,
        tags=prompt.tags,
        model_id=prompt.model_id,
        cover_img=prompt.cover_img,
        created_at=prompt.created_at.isoformat() if prompt.created_at else None,
        updated_at=prompt.updated_at.isoformat() if prompt.updated_at else None,
        model=ModelConfigResponse(
            id=model.id,
            channel=model.channel,
            channel_id=model.channel_id,
            model=model.model,
            temperature=model.temperature,
            max_tokens=model.max_tokens,
            top_p=model.top_p,
            presence_penalty=model.presence_penalty,
            frequency_penalty=model.frequency_penalty,
            n=model.n,
            stop=model.stop,
            logit_bias=model.logit_bias,
            stream=model.stream,
            response_format=model.response_format,
            endpoint_url=model.endpoint_url,
            file_path=model.file_path,
            api_key=model.api_key,
            created_at=model.created_at.isoformat() if model.created_at else None
        ) if model else None
    )
    
    return SuccessResponse(
        success=True,
        message="获取提示词信息成功",
        data=response_data
    )
