# app.py

# 1. Environment Patch for Async Compatibility (MUST BE AT THE TOP)
import llama_index.core
from llama_index.core.instrumentation.dispatcher import Dispatcher
from llama_index.core.callbacks import CallbackManager

class DummyDispatcher(Dispatcher):
    def dispatch(self, event) -> None:
        pass

llama_index.core.Settings.dispatcher = DummyDispatcher()
llama_index.core.Settings.callback_manager = CallbackManager([])


# 2. FastAPI Application
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
# Import the pre-built query engine instance from our core module
from fusion_retriever.core.engine import query_engine_instance as query_engine

app = FastAPI(title="Enterprise RAG API", version="1.0.0")

# 3. Streaming API Endpoint
async def stream_generator(query: str):
    """Async generator that yields response chunks from the query engine."""
    response = await query_engine.aquery(query)
    async for chunk in response.async_response_gen():
        yield chunk

@app.get("/stream-query", tags=["Querying"])
async def handle_stream_query(q: str):
    """
    Performs a streaming query against the RAG system.
    """
    return StreamingResponse(stream_generator(q), media_type="text/plain")

# Optional: Add a simple root endpoint for health checks
@app.get("/", tags=["Health Check"])
async def root():
    return {"status": "ok", "message": "RAG API is running"}


# To run the server, use the command in your terminal:
# uvicorn app:app --reload