from dotenv import load_dotenv, find_dotenv
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from langchain_core.output_parsers import PydanticOutputParser

_ = load_dotenv(find_dotenv())

# 定义你的输出对象
class Date(BaseModel):
    year: int = Field(description="Year")
    month: int = Field(description="Month")
    day: int = Field(description="Day")
    era: str = Field(description="BC or AD")
    
model_name = 'gpt-4o-mini'
temperature = 0
llm = ChatOpenAI(model_name=model_name, temperature=temperature)

# 定义结构化输出的模型
structured_llm = llm.with_structured_output(Date)

template = """提取用户输入中的日期。
用户输入:
{query}"""

prompt = PromptTemplate(
    template=template,
)

query = "2024年八月20日天气晴..."
input_prompt = prompt.format_prompt(query=query)

response = structured_llm.invoke(input_prompt)
print(response)

json_schema = {
    "title": "Date",
    "description": "Formated date expression",
    "type": "object",
    "properties": {
        "year": {
            "type": "integer",
            "description": "year, YYYY",
        },
        "month": {
            "type": "integer",
            "description": "month, MM",
        },
        "day": {
            "type": "integer",
            "description": "day, DD",
        },
        "era": {
            "type": "string",
            "description": "BC or AD",
        },
    },
}
structured_llm = llm.with_structured_output(json_schema)

print(structured_llm.invoke(input_prompt))

from langchain_core.output_parsers import JsonOutputParser
parser = JsonOutputParser(pydantic_object=Date)

prompt = PromptTemplate(
    template="提取用户输入中的日期。\n用户输入:{query}\n{format_instructions}",
    input_variables=["query"],
    partial_variables={"format_instructions": parser.get_format_instructions()},
)
print("=================")
print(prompt)
print("=================")
input_prompt = prompt.format_prompt(query=query)
print("xxxxxxxxxxxxxxxxxxxx")
print(input_prompt)
print("xxxxxxxxxxxxxxxxxxxx")
output = llm.invoke(input_prompt)
print("原始输出:\n"+output.content)

print("\n解析后:")
print(parser.invoke(output))
