import os

from langchain_community.llms.ollama import Ollama
from langchain_community.utils.openai_functions import (
    convert_pydantic_to_openai_function,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser


class Joke(BaseModel):
    """Joke to tell user."""

    setup: str = Field(description="question to set up a joke")
    punchline: str = Field(description="answer to resolve the joke")


openai_functions = [convert_pydantic_to_openai_function(Joke)]

OPENAI_API_KEY = os.environ["OPENAI_API_KEY_ZHIHU"]
OPENAI_API_BASE_ZHIHU = os.environ["OPENAI_API_BASE_ZHIHU"]
model = ChatOpenAI(temperature=0.5, openai_api_key=OPENAI_API_KEY,openai_api_base=OPENAI_API_BASE_ZHIHU)

prompt = ChatPromptTemplate.from_messages(
    [("system", "You are helpful assistant"), ("user", "{input}")]
)

parser = JsonOutputFunctionsParser()

chain = prompt | model.bind(functions=openai_functions) | parser

response = chain.invoke({"input": "tell me a joke"})
print(response)
