Spaces:
Runtime error
Runtime error
import gradio as gr | |
import random | |
import time | |
import os | |
import gradio as gr | |
import numpy as np | |
from langchain.chains import LLMChain | |
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder | |
import re | |
import requests | |
from langchain.chat_models import ChatOpenAI | |
from langchain.agents import AgentType, Tool, initialize_agent | |
from langchain.tools.render import format_tool_to_openai_function | |
from langchain.agents.format_scratchpad import format_to_openai_function_messages | |
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser | |
from langchain.agents import AgentExecutor | |
from langchain.schema import ( | |
SystemMessage, | |
HumanMessage, | |
AIMessage | |
) | |
llm = ChatOpenAI( | |
temperature=0, | |
model='gpt-3.5-turbo-16k' | |
) | |
def extract_temperature(city): | |
headers = { | |
'Content-Type': 'application/json', | |
} | |
response = requests.post('http://api.weatherapi.com/v1/current.json',headers = headers, params = {'q': city, 'key': os.environ['WEATHER_API_KEY'] }) | |
data = response.json() | |
return str(data["current"]['temp_c']) + " °C" | |
personalities = ["You are a weather fact checker. You will check if the user prompts about the temperature in a certain city. You need to use the functions provided to you when needed.",] | |
def user(user_message, history): | |
return "", history + [[user_message, None]] | |
def remove_numbers(question): | |
return question.translate(str.maketrans('', '', '0123456789')) | |
# llm = ClaudeLLM() | |
def add_text(history, text): | |
print(history) | |
history = history + [(text, None)] | |
return history, "" | |
def qa_retrieve(chatlog, index): | |
msgs = [[('assistant',chat[1]), ('user', chat[0])] for chat in chatlog[:-1]] | |
flat_msgs = [y for x in msgs for y in x] | |
flat_msgs = list([("system", personalities[0])] + flat_msgs + [("user", "{input}")] + [MessagesPlaceholder(variable_name="agent_scratchpad")]) | |
print(flat_msgs) | |
print(type(flat_msgs)) | |
msgs = flat_msgs | |
prompt = ChatPromptTemplate.from_messages( | |
msgs | |
) | |
tools = [ | |
Tool( | |
name="Search", | |
func= extract_temperature, | |
description="useful for when you want to retrieve temperature degrees in a certain city or country. Input should be in the form of a string containing the city or country provided.", | |
),] | |
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools]) | |
agent = ( | |
{ | |
"input": lambda x: x["input"], | |
"agent_scratchpad": lambda x: format_to_openai_function_messages( | |
x["intermediate_steps"] | |
), | |
} | |
| prompt | |
| llm_with_tools | |
| OpenAIFunctionsAgentOutputParser() | |
) | |
print(f"Chatlog qa: {chatlog}") | |
query = chatlog[-1][0] | |
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) | |
gen = agent_executor.invoke( | |
{ | |
"input": query | |
}) | |
# prompt = PromptTemplate( | |
# input_variables=["query"], | |
# template=""" | |
# {personality} | |
# {query} | |
# """, | |
# ) | |
# llm = BardLLM() | |
# chain = LLMChain(llm=llm, prompt = prompt, ) | |
# response = chain.run(query=query, personality = personalities[0]) | |
chatlog[-1][1] = gen['output'] | |
return chatlog | |
def flush(): | |
global db | |
db = "" | |
return None | |
with gr.Blocks(css = """#white-button { | |
background-color: #FFFFFF; | |
color: #000000; | |
} | |
#orange-button-1 { | |
background-color: #FFDAB9; | |
color: #000000; | |
} | |
#orange-button-2 { | |
background-color: #FFA07A; | |
color: #FFFFFF; | |
} | |
#orange-button-3 { | |
background-color: #FF4500; | |
color: #FFFFFF; | |
}""", theme=gr.themes.Soft()) as demo: | |
chatbot = gr.Chatbot().style(height=750) | |
with gr.Row(): | |
with gr.Column(scale = 0.75, min_width=0): | |
msg = gr.Textbox(placeholder = "Enter text and press enter",show_label=False).style(container = False) | |
with gr.Column(scale = 0.25, min_width=0): | |
clear = gr.Button("Clear") | |
index = gr.Textbox(value = "0", visible = False) | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( | |
qa_retrieve, [chatbot, index], chatbot | |
) | |
# marcus.click(lambda x: x, marcus, msg) | |
# travel_guide.click(lambda x: x, travel_guide, msg) | |
# astrologer.click(lambda x: x, astrologer, msg) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
demo.launch() |