import os

from langchain.output_parsers import OutputFixingParser, RetryOutputParser
from langchain_core.output_parsers import PydanticOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field


class Action(BaseModel):
    action: str = Field(description="action to take")
    action_input: str = Field(description="input to the action")

llm = ChatOpenAI(model="gpt-3.5-turbo", api_key=os.environ["OPENAI_API_KEY_ZHIHU"],base_url=os.environ["OPENAI_API_BASE_ZHIHU"])

parser = PydanticOutputParser(pydantic_object=Action)

prompt = PromptTemplate(
    template="Answer the user query.\n{format_instructions}\n{query}\n",
    input_variables=["query"],
    partial_variables={"format_instructions": parser.get_format_instructions()},
)

prompt_value = prompt.format_prompt(query="who is leo di caprios gf?")

pipline = prompt | llm | parser

bad_response = '{"action": "search"}'
fix_parser = OutputFixingParser.from_llm(parser=parser, llm=llm)
print(fix_parser.parse(bad_response))

retry_parser = RetryOutputParser.from_llm(parser=parser, llm=llm)
print(retry_parser.parse_with_prompt(bad_response, prompt_value))
