import os

from langchain_community.llms.ollama import Ollama
from langchain_community.utils.openai_functions import (
    convert_pydantic_to_openai_function
)
from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser, JsonOutputKeyToolsParser, \
    PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from pydantic.v1 import validator


class Joke(BaseModel):
    """Joke to tell user."""

    setup: str = Field(description="question to set up a joke")
    punchline: str = Field(description="answer to resolve the joke")

    # You can add custom validation logic easily with Pydantic.
    @validator("setup")
    def question_ends_with_question_mark(cls, field):
        if field[-1] != "?":
            raise ValueError("Badly formed question!")
        return field

OPENAI_API_KEY = os.environ["OPENAI_API_KEY_ZHIHU"]
OPENAI_API_BASE_ZHIHU = os.environ["OPENAI_API_BASE_ZHIHU"]
model_tools = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.5, openai_api_key=OPENAI_API_KEY,openai_api_base=OPENAI_API_BASE_ZHIHU).bind_tools([Joke])

prompt = ChatPromptTemplate.from_messages(
    [("system", "You are helpful assistant"), ("user", "{input}")]
)

parser = PydanticToolsParser(tools=[Joke])
chain = prompt | model_tools | parser

response = chain.invoke({"input": "Tell me a joke"})
print(response)
