from typing import Optional

from langchain_core.output_parsers import SimpleJsonOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field

from env_utils import LOCAL_API_KEY, LOCAL_BASE_URL


class OutDemo(BaseModel):
    setup: str = Field(description="头部份")
    punchline: str = Field(description="重点")
    rating: Optional[int] = Field(description="评分")

llm = ChatOpenAI(
    model="qwen3-8b",
    temperature=0.8,
    api_key=LOCAL_API_KEY,
    base_url=LOCAL_BASE_URL,
    extra_body={'chat_template_kwargs': {'enable_thinking': False}},
)

# prompt_template = PromptTemplate.from_template("我想咨询关于{topic}的动漫")
#
# runnable = llm.with_structured_output(OutDemo)
#
# chain = prompt_template | runnable
#
# resp = chain.invoke({"topic": "高达seed"})
# print(resp)

prompt_json = ChatPromptTemplate.from_template(
    "你是一个动漫专家"
    "你必须输出一个包含answer和nextQuestion的json格式，其中answer是用户问提的回答，nextQuestion是预测用户的下一个问题"
    "{question}"
)

chain = prompt_json | llm | SimpleJsonOutputParser()
resp = chain.invoke({"question": "你知道高达SEED吗？"})
print(resp)
