# import os
#
# import openai
# openai.api_key =os.environ["OPENAI_API_KEY"]
##
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
llm = OpenAI(model_name='gpt-3.5-turbo',temperature=0.9
             # ,openai_api_key="sk-U5VJimeGvxIVG796996btwSqcclGBalCcfAx56CZJNrXYA3O"
             )
t1=llm.predict("What would be a good company name for a company that makes colorful socks?")

##
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
prompt = PromptTemplate(
    input_variables=["food"],
    template="What are 5 vacation destinations for someone who likes to eat {food}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
print(chain.run("fruit"))

##
from serpapi import GoogleSearch
search = GoogleSearch({
    "q": "coffee",
    "location": "Austin,Texas",
    "api_key": "15bcd9628089a862f6a2c20daa6d64efa947461cc0b99f7db0902793526fca4b"
  })
result = search.get_dict()
print(result)
##
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.llms import OpenAI
llm = OpenAI(temperature=0)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, verbose=True)
t2=agent.run("Who is the current leader of Japan? What is the largest prime number that is smaller than their age?")
print(t2)
##
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI

from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field, validator
from typing import List

model_name = 'gpt-3.5-turbo'
temperature = 0.0
model = OpenAI(model_name=model_name, temperature=temperature)


class Joke(BaseModel):
    setup: str = Field(description="question to set up a joke")
    punchline: str = Field(description="answer to resolve the joke")
    
    # You can add custom validation logic easily with Pydantic.
    @validator('setup')
    def question_ends_with_question_mark(cls, field):
        if field[-1] != '?':
            raise ValueError("Badly formed question!")
        return field
    
# Set up a parser + inject instructions into the prompt template.
parser = PydanticOutputParser(pydantic_object=Joke)

prompt = PromptTemplate(
    template="Answer the user query.\n{format_instructions}\n{query}\n",
    input_variables=["query"],
    partial_variables={"format_instructions": parser.get_format_instructions()}
)

# And a query intented to prompt a language model to populate the data structure.
joke_query = "Tell me a joke."
_input = prompt.format_prompt(query=joke_query)
print('_input:  ',_input.to_string())

output = model(_input.to_string())

po=parser.parse(output)

##
ist=parser.get_format_instructions()