import json

from fastapi import APIRouter, Body, Request
from langchain_core.runnables import RunnableConfig
from loguru import logger
from starlette.responses import StreamingResponse

from app.api.exception.errcode import ServerError
from app.api.llms.callback_handler import LLMCallbackHandler
from app.api.llms.tengits_embedding import TengitsEmbedding
from app.api.llms.tengits_llm import TengitsLLM
from app.api.llms.utils import get_field_value
from app.api.schemas import resp_200, LLMServerCreateReq, LLMInvokeReq, LLMModelCreateReq, EmbeddingReq
from app.api.service.llm import LLMService

router = APIRouter(prefix='/llm', tags=['LLM'])




@router.get("", )
def get_all_llm(request: Request, ):
    return resp_200(data="hello")


@router.post("/server")
def add_llm_server(request: Request, server: LLMServerCreateReq = Body(..., description="服务提供方所有数据")):
    """添加llm服务商"""
    ret = LLMService.add_server(request, server)
    logger.info(f"添加服务商成功,ret={ret}")
    return resp_200(data=ret)


@router.post("/model")
def add_llm_model(request: Request, model: LLMModelCreateReq = Body(..., description="添加模型")):
    """添加llm模型"""
    try:
        ret = LLMService.add_model(request, model)
        return resp_200(data=ret)
    except Exception as e:
        logger.error(f"add model error:{e}")
        return ServerError.return_resp(f"添加模型失败:{model.model_name}")



@router.post("/invoke")
def invoke_llm(request: Request, payload: LLMInvokeReq = Body(..., description="模型调用参数")):
    """模型调用"""
    logger.debug(f" router.post invoke,payload={payload}")
    model_id = payload.model_id
    messages = payload.messages
    config = payload.config
    logger.debug("模型实例化")
    llm = TengitsLLM(model_id=model_id, **config)
    logger.debug("模型实例化完成,开始调用模型")


    # 创建异步生成器用于流式输出
    async def stream_generator():
        logger.debug("开始流失输出")
        try:
            if 'streaming' in config and config['streaming'] is False:
                item = {}
                async for chunk in llm.astream(messages,config=RunnableConfig(callbacks=[LLMCallbackHandler()]) ):
                    if 'content' not in item:
                        item['content'] = chunk.content
                    else:
                        item['content'] += chunk.content
                    if chunk.response_metadata:
                        item['response_metadata'] = chunk.response_metadata

                    item['usage_metadata'] = get_field_value(chunk, 'usage_metadata')
                yield f"data: {json.dumps(item, ensure_ascii=False)}\n\n"
            else:
                async for chunk in llm.astream(messages,config=RunnableConfig(callbacks=[LLMCallbackHandler()])):
                    # logger.info(f"chunk llm,name={llm.name},chunk={chunk}")
                    # chunk 是 AIMessageChunk，可以转 dict 或 str
                    yield f"data: {json.dumps({'content': chunk.content, 'response_metadata': chunk.response_metadata, 'usage_metadata': chunk.usage_metadata}, ensure_ascii=False)}\n\n"

        except Exception as e:
            # 错误也要传递给前端
            yield f"data: {json.dumps({'error': str(e)}, ensure_ascii=False)}\n\n"

    return StreamingResponse(
        stream_generator(),
        media_type="text/event-stream"  # SSE 格式
    )


@router.post("/embedding")
def embedding(request: Request, payload: EmbeddingReq = Body(..., description="向量化调用参数")):
    """模型调用"""
    logger.debug(f"router.post embedding,payload={payload}")

    model_id = payload.model_id
    texts = payload.texts
    config = payload.config

    embeddings = TengitsEmbedding(model_id=model_id, **config)
    ret = embeddings.embed_documents(texts)

    return resp_200(data=ret)
