import dotenv
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import Field, BaseModel
from langchain_core.runnables import RunnablePassthrough
dotenv.load_dotenv()

class QAExtractor(BaseModel):
    """这是一个问答键值对工具，传递对应的假设性问题和答案"""
    question: str = Field(description="假设性问题")
    answer: str = Field(description="假设性问题对应的答案")

llm = ChatOpenAI(model="kimi-k2-0711-preview", temperature=0)

structured_llm = llm.with_structured_output(QAExtractor, method="json_mode")

prompt = ChatPromptTemplate.from_messages([
    ("system", "请从用户传递的query中提取假设性问题和答案。响应格式为json，并携带'question' 和 'answer'"),
    ("human", "{query}"),
])

chain = {"query": RunnablePassthrough()} | prompt | structured_llm

print(chain.invoke("我叫慕小课，我喜欢打篮球，游泳"))