import os

from langchain_community.llms import Ollama
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field

llm = Ollama(model="llama3", base_url="http://localhost:11434")
#llm = ChatOpenAI(model="gpt-3.5-turbo", api_key=os.environ["OPENAI_API_KEY_ZHIHU"],base_url=os.environ["OPENAI_API_BASE_ZHIHU"])
# 定义提示模板

# Define your desired data structure.
class Joke(BaseModel):
    setup: str = Field(description="question to set up a joke")
    punchline: str = Field(description="answer to resolve the joke")

parser  = JsonOutputParser(pydantic_object=Joke)

prompt = PromptTemplate(template="Answer the user query.\n{format_instructions}\n{query}", input_variables=["query"],
                        partial_variables = {"format_instructions":parser.get_format_instructions()})

print(prompt)
chain = prompt | llm | parser
response = chain.invoke({"query":"Tell me a joke."})
print(response)



