from fastapi import APIRouter, Query
from fastapi.responses import JSONResponse
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama import ChatOllama
from langchain_core.runnables import RunnableParallel, RunnablePassthrough

router = APIRouter()

# -----------------------------
# 本地 Ollama 模型
# -----------------------------
llm = ChatOllama(
    model="qwen3:0.6b",                 # 模型名称
    base_url="http://172.16.21.38:11436"  # Ollama 服务地址
)

# -----------------------------
# 1. 原始接口：单任务
# -----------------------------
@router.get("/ask_ollama", summary="调用本地 Ollama 模型（单任务）")
async def ask_ollama(
    question: str = Query(..., description="用户输入的问题")
):
    """调用本地 Ollama 模型，返回 JSON 格式结果"""
    parser = JsonOutputParser()
    prompt = ChatPromptTemplate.from_template(
        "请根据以下问题，给出简短回答，并返回 JSON 格式：\n\n问题: {question}\n\n"
        "输出格式示例：{{\"answer\": \"这里是回答\"}}"
    )
    chain = prompt | llm | parser
    try:
        result = chain.invoke({"question": question})
        return JSONResponse(content=result)
    except Exception as e:
        return JSONResponse(content={"error": str(e)}, status_code=500)


# -----------------------------
# 2. RunnableParallel 接口：多任务并行
# -----------------------------
@router.get("/ask_ollama_parallel", summary="调用本地 Ollama 模型（并行任务）")
async def ask_ollama_parallel(
    text: str = Query(..., description="输入的文本")
):
    """并行执行多个任务：总结 + 关键词提取"""
    parser = StrOutputParser()

    # 总结任务
    summary_prompt = ChatPromptTemplate.from_template("请总结以下文本:\n\n{text}")
    summary_chain = summary_prompt | llm | parser

    # 关键词任务
    keywords_prompt = ChatPromptTemplate.from_template("请提取以下文本的关键词:\n\n{text}")
    keywords_chain = keywords_prompt | llm | parser

    # 并行执行
    parallel_chain = RunnableParallel(
        summary=summary_chain,
        keywords=keywords_chain
    )

    try:
        result = parallel_chain.invoke({"text": text})
        return JSONResponse(content=result)
    except Exception as e:
        return JSONResponse(content={"error": str(e)}, status_code=500)


# -----------------------------
# 3. RunnablePassthrough 接口：保留输入
# -----------------------------
@router.get("/ask_ollama_passthrough", summary="调用本地 Ollama 模型（保留原始输入）")
async def ask_ollama_passthrough(
    text: str = Query(..., description="输入的文本")
):
    """同时返回原始输入 + 模型总结结果"""
    parser = StrOutputParser()

    summary_prompt = ChatPromptTemplate.from_template("请总结以下文本:\n\n{text}")
    summary_chain = summary_prompt | llm | parser

    # ✅ 用 RunnableParallel 包裹 dict
    chain = RunnableParallel(
        original=RunnablePassthrough(),
        summary=summary_chain
    )

    try:
        result = chain.invoke({"text": text})
        return JSONResponse(content=result)
    except Exception as e:
        return JSONResponse(content={"error": str(e)}, status_code=500)
