from fastapi import APIRouter, HTTPException, Query, Depends
from fastapi.responses import StreamingResponse
from scorpio.core.database import VectorDatabaseManager
from scorpio.shared.generation import GenerationService
from scorpio.shared import get_db_manager, get_embedding_config, get_config, ExternalEmbeddingClient
from scorpio.services.database import QueryProcessor
from scorpio.core.common import get_logger, EmbeddingConfig, ScorpioConfig
from scorpio.agenthub import RAGAgent
from scorpio.core.providers import LLMClient

from .utils import QueryResponse, QueryRequest
logger = get_logger(__name__)

router = APIRouter(prefix="/api/v1", tags=["RAG document API"])

@router.get("/query/stream")
async def query_stream(
    question: str,
    top_k: int = Query(5, ge=1, le=20),
    max_tokens: int = Query(1000, ge=100, le=4000),
    temperature: float = Query(0.7, ge=0.0, le=1.0),
    embedding_config: EmbeddingConfig = Depends(get_embedding_config),
    db_manager: VectorDatabaseManager = Depends(get_db_manager),
    config: ScorpioConfig = Depends(get_config)
):
    """RAG流式查询API"""
    query_processor = QueryProcessor(db_manager)
    embedding_client = ExternalEmbeddingClient(embedding_config)
    model_param = config.get_default_model()
    model_param.temperature=temperature
    model_param.max_tokens=max_tokens
    llm_client=LLMClient(
            provider=model_param.provider, model_param=model_param)
    generation_service = GenerationService(model_param)
    logger.info(f"stream query question: {question}")
    shared ={
            "context": {
                "db_manager": db_manager,
                "processor": query_processor,
                "embedding_client": embedding_client,
                "llm_client": llm_client,
                "generation": generation_service
            },
            "query": {
                "stream": True,
                "top_k": top_k,
                "question": question
            }
        }
    try:
        agent = RAGAgent(shared)
        agent.create_flow()
        await agent.run()
        logger.info(f"stream query response successfully for question:\n {question}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"查询失败: {str(e)}")
    response = shared["query"].get("response")
    return StreamingResponse(response, media_type="text/plain")

@router.post("/query")
async def query(
    request: QueryRequest,
    embedding_config: EmbeddingConfig = Depends(get_embedding_config),
    db_manager: VectorDatabaseManager = Depends(get_db_manager),
    config: ScorpioConfig = Depends(get_config)
): 
    """RAG 查询 API"""
    query_processor = QueryProcessor(db_manager)
    embedding_client = ExternalEmbeddingClient(embedding_config)  
    model_param = config.get_default_model()
    model_param.temperature=request.temperature
    if request.max_tokens:
        model_param.max_tokens=request.max_tokens
    llm_client=LLMClient(
            provider=model_param.provider, model_param=model_param)
    generation_service = GenerationService(model_param)
    logger.info(f"stream query question: {request.question}")
    shared ={
            "context": {
                "db_manager": db_manager,
                "processor": query_processor,
                "embedding_client": embedding_client,
                "llm_client": llm_client,
                "generation": generation_service
            },
            "query": {
                "question": request.question,
                "top_k": request.top_k,
                "max_tokens": request.max_tokens,
                "temperature": request.temperature
            }
        }
    try:
        
        agent = RAGAgent(shared)
        agent.create_flow()
        await agent.run()
        logger.info(f"stream query response successfully for question:\n {request.question}")
        # to-do: format response
        return shared["query"].get("response")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"查询失败: {str(e)}")

