Spaces:
Sleeping
Sleeping
import os | |
import copy | |
import random, time | |
import gradio as gr | |
import openai | |
from tools import get_movie_recs | |
openai.api_key = os.environ['OPENAI_API_KEY'] | |
##################### | |
### Chatbot logic ### | |
##################### | |
functions = [ | |
{ | |
"name": "get_movie_recs", | |
"description": "Given conversation context, generate a list of movie recommendations.", | |
"parameters": { | |
"type": "object", | |
"properties": { | |
"context": { | |
"type": "string", | |
"description": "Entire conversation history to this point.", | |
}, | |
}, | |
}, | |
} | |
] | |
available_functions = {'get_movie_recs': get_movie_recs} | |
system_prompt = """ | |
You are a helpful assistant for customers of Swank Motion Pictures, a company that provides movie licensing | |
for various public and private events. Your job is to assist customers in selecting a movie. Customers usually | |
select movies based on the intended audience or event theme, and may also care about genre preference, movie length, | |
and mood. At your discretion, you may call a `get_movie_recs` function to query a recommender system. | |
It takes the entire conversation history as input and returns a list of movies as output. | |
Use the function to ground your response where appropriate. | |
If the user is asking to pick between options they provide, do not call the function. Otherwise, call the function. | |
Do not reveal to the user that you can query a recommender system. | |
Don't equivocate and take a stand if the user asks you a question. | |
If uncertain, provide information that will help the user make a decision. Don't repeat what the user said. | |
Be direct. Don't hedge. Omit disclaimers. | |
""" | |
greeting = """ | |
Hey there! Need help picking out a movie for your event? Just describe your audience or theme, | |
and I'll suggest some great options! | |
""" | |
initial_state = [ | |
{"role": "system", "content": system_prompt}, | |
{"role": "assistant", "content": greeting}, | |
] | |
# response logic for chatbot | |
def respond( | |
user_message, | |
chat_history, | |
openai_chat_history, | |
min_year, | |
max_year, | |
allowed_movies, | |
): | |
''' | |
:param user_message: string, the user's message | |
:param chat_history: list of lists, each sublist is a pair of user and assistant messages. This is rendered in the chatbot. | |
:param openai_chat_history: list of dicts, superset of chat_history that includes function calls. This is sent to OpenAI. | |
''' | |
openai_chat_history.append({'role': 'user', 'content': user_message}) | |
chat_history.append([user_message, None]) | |
# Step 1: send conversation and available functions to GPT | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=openai_chat_history, | |
functions=functions, | |
function_call="auto", | |
temperature=0, | |
stream=True, | |
) | |
for chunk in response: | |
delta = chunk.choices[0].delta | |
# Step 2: check if GPT wanted to call a function | |
if "function_call" in delta: | |
if "name" in delta.function_call: | |
function_name = delta["function_call"]["name"] | |
function_to_call = available_functions[function_name] | |
# Step 3: call the function | |
elif chunk.choices[0].finish_reason == "function_call": | |
# send conversation history that's visible in the chatbot | |
context = "" | |
for interaction in chat_history[:-1]: | |
context+=f"User: {interaction[0]}\nAssistant: {interaction[1]}\n" | |
context+=f"User: {user_message}" # include the latest message | |
print('calling function') | |
function_response = function_to_call( | |
context=context, | |
min_year=min_year, | |
max_year=max_year, | |
allowed_movies=allowed_movies, | |
) | |
# Step 4: send the info on the function call and function response to GPT | |
# include function call in history | |
openai_chat_history.append({ | |
'role': 'assistant', | |
'content': None, | |
'function_call': {'name': function_name, 'arguments': 'null'}, | |
}) | |
# include function response | |
openai_chat_history.append( | |
{ | |
"role": "function", | |
"name": function_name, | |
"content": function_response, | |
} | |
) | |
# get a new response from GPT where it can see the function response | |
second_response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=openai_chat_history, | |
stream=True, | |
) | |
for chunk2 in second_response: | |
if len(chunk2['choices'][0]['delta']) != 0: | |
if chat_history[-1][1] is None: chat_history[-1][1] = "" | |
chat_history[-1][1] += chunk2['choices'][0]['delta']['content'] | |
yield "", chat_history, openai_chat_history | |
# if last chunk, update openai_chat_history with full message | |
if chunk2.choices[0].finish_reason == "stop": | |
openai_chat_history.append({'role': 'assistant', 'content': chat_history[-1][1]}) | |
yield "", chat_history, openai_chat_history | |
# Step 5: If no function call, just return updated state variables | |
elif 'function_call' not in delta and len(delta)!=0: | |
if chat_history[-1][1] is None: chat_history[-1][1] = "" | |
chat_history[-1][1] += delta['content'] | |
yield "", chat_history, openai_chat_history | |
# if last chunk, update openai_chat_history with full message | |
elif chunk.choices[0].finish_reason == 'stop': | |
openai_chat_history.append({'role': 'assistant', 'content': chat_history[-1][1]}) | |
yield "", chat_history, openai_chat_history | |
######################## | |
### Gradio interface ### | |
######################## | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
# This state variable also includes function calls and system message. Be careful with getting out of sync with the displayed conversation. | |
openai_history_state = gr.State(copy.deepcopy(initial_state)) | |
# saved_input = gr.State() # for retry | |
with gr.Column(variant='panel'): | |
gr.Markdown(f"<h3 style='text-align: center; margin-bottom: 1rem'>{greeting}</h3>") | |
chatbot = gr.Chatbot() | |
with gr.Group(): | |
# Input + submit buttons | |
with gr.Row(): | |
input_box = gr.Textbox( | |
container=False, | |
show_label=False, | |
label='Message', | |
placeholder='Type a message...', | |
scale=7, | |
autofocus=True, | |
) | |
submit_btn = gr.Button('Submit', variant='primary', scale=1, min_width=150,) | |
# retry + clear buttons | |
with gr.Row(): | |
retry_btn = gr.Button('Retry', variant='secondary',) | |
clear_btn = gr.Button('Clear', variant='secondary') | |
# Filters | |
gr.Markdown("<h4 style='margin-left: 1rem'>Filters</h4>") | |
# let user pick minimum and maximum year | |
min_year = gr.Slider(1900, 2024, 1900, label='Min year', interactive=True,) | |
max_year = gr.Slider(1900, 2024, 2024, label='Max year', interactive=True,) | |
# let user write down a list of movies. results must be a subset of this list | |
allowed_movies = gr.Textbox( | |
# container=False, | |
show_label=True, | |
label='Allowed movie list', | |
placeholder='Type a list of movies. Result will be a subset of this list.', | |
) | |
# example inputs | |
gr.Examples( | |
[ | |
# 'Please recommend some movies with lots of jumpscares or something with lots of blood.. I want to watch some movie that will not let me nor my cousins sleep soundly tonight.', | |
# "Which movie is better? Warrior (2011) or Southpaw (2015)?. I'm looking to watch a boxing movie, and am not sure what to pick between Warrior (2011) or Southpaw (2015). I'm a big fan of both, Jake Gyllenhall and Tom Hardy and honestly just couldn't pick between the two", | |
"What are some good action movies for kids under 10?", | |
"I want to watch a film about space travel but not too scientific.", | |
"I need a family-friendly movie that deals with environmental themes for an Earth Day event.", | |
"Find culturally diverse films for an international film festival at our college.", | |
"What are some uplifting movies for a charity fundraiser supporting mental health awareness?", | |
], | |
inputs=[input_box], | |
) | |
# bind events | |
gr.on( | |
triggers=[input_box.submit, submit_btn.click], | |
fn=respond, | |
api_name='respond', | |
inputs=[input_box, chatbot, openai_history_state, min_year, max_year, allowed_movies,], | |
outputs=[input_box, chatbot, openai_history_state], | |
) | |
clear_btn.click( | |
fn=lambda: ('', [], initial_state,), | |
inputs=None, | |
outputs=[input_box, chatbot, openai_history_state,], | |
queue=False, | |
api_name=False, | |
) | |
demo.queue() | |
demo.launch() |