from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.memory import ChatMemoryBuffer
import os

from chat_engine import get_chat_engine_for_session
from init_config import Config
from init_db import load_vector_store
from init_model import init_models
from query_engine import set_vector_store, get_query_engine_for_session

app = FastAPI()

# 允许跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# 请求模型
class ChatRequest(BaseModel):
    message: str
    session_id: str = "default"


# 存储不同会话的chat_engine实例
session_engines = {}

# 单轮对话，RAG
@app.post("/query")
async def query_endpoint(request: ChatRequest):
    try:
        query_engine = get_query_engine_for_session(request.session_id)
        response = await query_engine.query(request.message)
        return {"response": str(response)}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# 多轮对话
@app.post("/chat")
async def chat_endpoint(request: ChatRequest):
    try:
        query_engine = get_chat_engine_for_session(request.session_id)
        response = await query_engine.chat(request.message)
        return {"response": str(response)}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/health")
async def health_check():
    return {"status": "healthy"}


if __name__ == "__main__":
    import uvicorn

    embed_model, llm = init_models(Config.REMOTE_LLM_API)
    index = load_vector_store()
    set_vector_store(index)

    uvicorn.run(app, host="0.0.0.0", port=8000)