from fastapi import APIRouter, Depends, HTTPException, status,Request
from sqlalchemy.orm import Session
from typing import List, Optional
from pydantic import BaseModel
from datetime import datetime
from fastapi.responses import StreamingResponse
import json
import time

from app.db.base import get_db
from app.db.models import Message, Conversation as ConversationModel, User
from app.api.auth import get_current_user
from app.llm.index import llm
from app.core.config import settings

router = APIRouter()

class MessageCreate(BaseModel):
    content: str
    role: str = "user"

class MessageResponse(BaseModel):
    id: int
    conversation_id: int
    role: str
    content: str
    created_at: str

    class Config:
        from_attributes = True

class ChatRequest(BaseModel):
    messages: List[MessageCreate]
    model: Optional[str] = None
    temperature: Optional[float] = None
    max_tokens: Optional[int] = None
    conversation_id: Optional[int] = None

class ChatResponse(BaseModel):
    id: str
    object: str
    created: int
    model: str
    choices: List[dict]
    usage: dict

@router.get("/messages", response_model=List[MessageResponse])
def get_messages(
    conversation_id: int,
    skip: int = 0,
    limit: int = 50,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    # 验证对话所有权
    # , ConversationModel.user_id == current_user.id)\
    conversation = db.query(ConversationModel)\
        .filter(ConversationModel.id == conversation_id)\
        .first()
    if not conversation:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="对话不存在"
        )
    
    messages = db.query(Message)\
        .filter(Message.conversation_id == conversation_id)\
        .order_by(Message.created_at.desc())\
        .offset(skip)\
        .limit(limit)\
        .all()
    return messages

@router.post("/completions")
async def create_chat_completion(
    chat_request: ChatRequest,
    request: Request,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    # 如果会话不存在，则创建会话
    conversation_id = chat_request.conversation_id
    # 验证对话所有权
    conversation = db.query(ConversationModel)\
        .filter(ConversationModel.id == conversation_id, ConversationModel.user_id == current_user.id)\
        .first()
    if not conversation:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail="对话不存在"
        )
    
    # 保存用户消息
    for message in chat_request.messages:
        db_message = Message(
            conversation_id=conversation_id,
            role=message.role,
            content=message.content
        )
        db.add(db_message)
    
    async def generate():
        response_content = ""
        async for chunk in llm.astream_chat(
            messages=[{"role": msg.role, "content": msg.content} for msg in chat_request.messages],
            model=chat_request.model or settings.DEFAULT_MODEL,
            temperature=chat_request.temperature,
            max_tokens=chat_request.max_tokens
        ):
            if chunk.choices[0].delta.content:
                response_content += chunk.choices[0].delta.content
                response_data = {
                    "id": f"chatcmpl-{int(time.time())}",
                    "object": "chat.completion.chunk",
                    "created": int(time.time()),
                    "model": chat_request.model or settings.DEFAULT_MODEL,
                    "choices": [{
                        "index": 0,
                        "delta": {"content": chunk.choices[0].delta.content},
                        "finish_reason": None
                    }]
                }
                yield f"data: {json.dumps(response_data)}\n\n"
        
        # 保存完整的AI响应
        db_message = Message(
            conversation_id=conversation_id,
            role="assistant",
            content=response_content
        )
        db.add(db_message)
        db.commit()
        
        # 发送结束标记
        end_data = {
            "id": f"chatcmpl-{int(time.time())}",
            "object": "chat.completion.chunk",
            "created": int(time.time()),
            "model": chat_request.model or settings.DEFAULT_MODEL,
            "choices": [{
                "index": 0,
                "delta": {},
                "finish_reason": "stop"
            }]
        }
        yield f"data: {json.dumps(end_data)}\n\n"
        yield "data: [DONE]\n\n"
    
    return StreamingResponse(
        generate(),
        media_type="text/event-stream"
    ) 