# 输出解析器

# 列表解析器
from langchain.output_parsers import (CommaSeparatedListOutputParser,
                                      DatetimeOutputParser,
                                      PydanticOutputParser)
from pydantic import BaseModel, Field, field_validator
from typing import List
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI, OpenAI
# from langchain.chains.llm import LLMChain
from openaiConfigurations import openai_api_key, openai_api_base
from langchain_ollama import ChatOllama

# output_parser = CommaSeparatedListOutputParser()
# format_instructions = output_parser.get_format_instructions()
# prompt = PromptTemplate(
#     template = "list five {subject}.\n{format_instructions}",
#     input_variables = ["subject"],
#     partial_variables = {"format_instructions": format_instructions}
# )

model = ChatOpenAI(openai_api_key = openai_api_key, openai_api_base = openai_api_base)
# _input = prompt.format(subject = "冰淇淋口味")
# output = model.invoke(_input)
# resp = output_parser.parse(output.content)
# # 注意resp是python中的list
# print(resp)

# # 日期和时间解析器
# output_parser = DatetimeOutputParser()
# template = '''回答用户问题:
# {question}
# {format_instruction}'''
# prompt = PromptTemplate.from_template(
#     template,
#     partial_variables = {"format_instruction": output_parser.get_format_instructions()},
# )
# llm = ChatOpenAI(openai_api_key = openai_api_key,
#                   openai_api_base = openai_api_base,)
# chain = prompt | llm
# output = chain.invoke("比特币大约是什么时候创建的？")
# # 输出的格式是datetime.datetime
# print(output_parser.parse(output.content))

# # 枚举解析器
# from langchain.output_parsers.enum import EnumOutputParser
# from enum import Enum
# class Colors(Enum):
#     RED = "红色"
#     GREEN = "绿色"
#     BLUE = "蓝色"

# parser = EnumOutputParser(enum = Colors)
# parser.parse("红色")
# parser.parse("绿色")
# print(parser.parse("蓝色"))

# model = ChatOpenAI(temperature = 0.0, openai_api_key = openai_api_key, openai_api_base = openai_api_base)
# # 定义所需的结构
# class Joke(BaseModel):
#     setup: str = Field(description = "question to set up a joke")
#     punchline: str = Field(description = "answer to resolve the joke")

#     # 添加自定义验证逻辑
#     @field_validator("setup")
#     def question_ends_with_question_mark(cls, field):
#         if field[-1] != "?":
#             raise ValueError("形式不正确的问题！")
#         return field
    
# joke_query = "tell me a joke."
# parser = PydanticOutputParser(pydantic_object = Joke)
# prompt = PromptTemplate(
#     template = "Answer the user query.\n{format_instruction}\n{query}\n",
#     input_variables = ["query"],
#     partial_variables = {"format_instruction": parser.get_format_instructions()},
# )

# _input = prompt.format_prompt(query = joke_query)
# output = model.invoke(_input.to_string())
# joke = parser.parse(output.content)
# # 注意：这里的joke是Joke的一个实例
# print(joke, type(joke))

# # 输出修正解析器，当发生回退时该解析器可以把错误输出信息和格式化指令一并
# # 发给新的llm处理
class Actor(BaseModel):
    name: str = Field(description = "name of an actor")
    file_names: List[str] = Field(description = "list of names of filems they starred in")

actor_query = "Generate the filmography for a random actor"
parser = PydanticOutputParser(pydantic_object = Actor)
# 假设下面的结果来自某个llm的输出,这个字符串无法解析成json
misformatted = "{'name': 'Tom Hanks', 'file_names': ['Forrest Gump']}"
# parser.parse(misformatted)

from langchain.output_parsers import OutputFixingParser
# 这里为了方便演示省略llm输出misformatted的过程，直接解析。
# llm = ChatOllama(model = "deepseek-r1:7b")
new_parser = OutputFixingParser.from_llm(parser = parser, llm = model)
resp = new_parser.parse(misformatted)
print(resp)