from typing import Literal

from fastapi import APIRouter, Body, Request
from sse_starlette.sse import EventSourceResponse
import sys
import importlib
import types

from chatchat.server.types.server.response.base import BaseResponse
from chatchat.settings import Settings
from chatchat.server.utils import get_prompt_template, get_server_configs

server_router = APIRouter(prefix="/server", tags=["Server State"])

available_template_types = list(Settings.prompt_settings.model_fields.keys())

# 服务器相关接口
server_router.post(
    "/configs",
    summary="获取服务器原始配置信息",
)(get_server_configs)


@server_router.post("/get_prompt_template", summary="获取服务区配置的 prompt 模板", response_model=BaseResponse)
def get_server_prompt_template(
        type: str = Body(
            "llm_model", description="模板类型，可选值：{available_template_types}"
        ),
        name: str = Body("default", description="模板名称"),
):
    prompt_template = get_prompt_template(type=type, name=name)
    if prompt_template is None:
        return BaseResponse.error("Prompt template not found")
    return BaseResponse.success(prompt_template)

# 动态导入 ask_question.py 里的主要逻辑（假设其主流程可被复用）
ask_question_module = importlib.import_module("ask_question")

# 提取核心问答流程为生成器（需先在 ask_question.py 中实现）
def ask_question_stream(question: str):
    # 这里假设 ask_question.py 中有 ask_and_stream 函数
    yield from ask_question_module.ask_and_stream(question)

@server_router.post("/ask_stream", summary="流式问答接口（SSE）")
async def ask_stream_api(request: Request, question: str = Body(..., description="你的问题")):
    def event_gen():
        for chunk in ask_question_stream(question):
            yield chunk
    return EventSourceResponse(event_gen())
