from langchain.prompts import (PromptTemplate)
from langchain_community.llms import Ollama
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field, validator

host = "localhost"
model = Ollama(base_url='http://' + host + ':11434', model="mistral")

class Joke(BaseModel):
    setup: str = Field(description="question to set up a joke")
    punchline: str = Field(description="answer to resolve the joke")

    @validator('setup')
    def question_ends_with_question_mark(cls, field):
        if field[-1] != "?":
            raise ValueError("Badly formed question!")
        return field

joke_query = "Tell me a joke."

parser = PydanticOutputParser(pydantic_object=Joke)

prompt = PromptTemplate(
    template="Answer the user query.\n{format_instructions}\n{query}\n",
    input_variables=["query"],
    partial_variables={"format_instructions": parser.get_format_instructions()}
)

_input = prompt.format_prompt(query=joke_query)

output = model(_input.to_string())

print(parser.parse(output))

_input = prompt.format_prompt(query="ice cream flavors")

output = model(_input.to_string())

print(parser.parse(output))