from typing import List
# from langchain.llms import OpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.messages import HumanMessage
from pydantic import BaseModel,Field,field_validator
import os
from langchain.chat_models import ChatOpenAI

# gpt-3.5-turbo
os.environ['OPENAI_API_KEY'] = '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'
chat = ChatOpenAI(
    model="gpt-3.5-turbo",
    temperature=0.3,
    max_tokens=200,
    api_key="hk-mtiquv1000041663a49a34520ad3294132cbce7abf1c2ef3",
    base_url="https://api.openai-hk.com/v1"
)

#定义用户的数据结构
class Joke(BaseModel):
    setup:str = Field(description='qestion to set up a joke')
    punchline:str = Field(description='answer to resolve the joke')
    #验证路径
    @field_validator("setup")
    def question_ends_with_question_mark(selfcls,field):
        if field[-1]!='?':
            raise ValueError("badly formed questioin!")
        return field
joke_query = 'Tell me a joke.'
parser = PydanticOutputParser(pydantic_object=Joke)
prompt = PromptTemplate(
    template="Anser the user query.\n{format_instructions}\n{query}\n",
    input_variables=["query"],
    partial_variables={"format_instructions":parser.get_format_instructions()}
)
chain = prompt | chat
msg = chain.invoke(input=joke_query)
# output = chat.invoke([HumanMessage(content=prompt.format_prompt(query=joke_query).to_string()])
# parser.parse(output)
response = chat.invoke(prompt.format_prompt(query=joke_query).to_string())
print(parser.parse(response))