from pydantic import BaseModel, Field
from langchain_ollama import ChatOllama
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
from langchain_core.output_parsers import PydanticOutputParser

class Writer(BaseModel):
    """作家类"""
    name: str = Field(description="name of a writer")
    nationality: str = Field(description="nationality of a writer")
    main_works: list = Field(description="List of major works")

# 实例化 Pydantic 输出解析器
output_parser = PydanticOutputParser(pydantic_object=Writer)

# 定义模板以确保 LLM 返回一个 JSON 格式的输出
prompt = ChatPromptTemplate.from_messages([
    HumanMessagePromptTemplate.from_template("{request} \n {format_instructions}"),
    SystemMessagePromptTemplate.from_template("请以 JSON 格式给出返回信息，不要有其他任何非json中的内容。")
])

llm = ChatOllama(
    model="llama3",
    temperature=0.5
)

formatted_messages = prompt.format_messages(
    request="海明威是谁？",
    format_instructions="我需要知道他的名字、国籍和主要作品。"
)


# 调用 LLM
result = llm.invoke(formatted_messages)

print("原始 LLM 输出：", result)
print("原始 LLM 输出内容：", result.content)

# 尝试解析格式化后的输出
try:
    parse_data = output_parser.parse(result.content)
    print("解析后的 Writer 对象：", parse_data)
except Exception as e:
    print("解析错误：", e)
