from langchain_community.llms import Ollama
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI

llm = Ollama(model="llama3", base_url="http://localhost:11434")
#llm = ChatOpenAI(model="gpt-3.5-turbo", api_key=os.environ["OPENAI_API_KEY_ZHIHU"],base_url=os.environ["OPENAI_API_BASE_ZHIHU"])
# 定义提示模板
parser  = JsonOutputParser()
prompt = PromptTemplate(template="Answer the user query.\n{format_instructions}\n{query}", input_variables=["query"],
                        partial_variables = {"format_instructions":parser.get_format_instructions()})

print(prompt)
chain = prompt | llm | parser
response = chain.invoke({"query":"Tell me a joke."})
print(response)



