Spaces:
Runtime error
Runtime error
File size: 5,722 Bytes
476489a 02c16f4 476489a 02c16f4 476489a 02c16f4 476489a 4740dd6 02c16f4 476489a 02c16f4 476489a 02c16f4 476489a 02c16f4 476489a 02c16f4 476489a 02c16f4 4740dd6 02c16f4 476489a 02c16f4 476489a 02c16f4 476489a 02c16f4 476489a 02c16f4 4740dd6 02c16f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import gradio as gr
import os
import openai
import requests
import json
openai.api_key = os.environ.get("OPENAI_API_KEY")
prompt_templates = {"Default ChatGPT": ""}
def get_empty_state():
return {"total_tokens": 0, "messages": []}
def download_prompt_templates():
url = "https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv"
response = requests.get(url)
for line in response.text.splitlines()[1:]:
act, prompt = line.split('","')
prompt_templates[act.replace('"', '')] = prompt.replace('"', '')
choices = list(prompt_templates.keys())
return gr.update(value=choices[0], choices=choices)
def on_token_change(user_token):
openai.api_key = user_token or os.environ.get("OPENAI_API_KEY")
def on_prompt_template_change(prompt_template):
if not isinstance(prompt_template, str): return
return prompt_templates[prompt_template]
def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state):
history = state['messages']
if not prompt:
return gr.update(value='', visible=state['total_tokens'] < 1_000), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']} / 3000", state
prompt_template = prompt_templates[prompt_template]
system_prompt = []
if prompt_template:
system_prompt = [{ "role": "system", "content": prompt_template }]
prompt_msg = { "role": "user", "content": prompt }
try:
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
history.append(prompt_msg)
history.append(completion.choices[0].message.to_dict())
state['total_tokens'] += completion['usage']['total_tokens']
except Exception as e:
history.append(prompt_msg)
history.append({
"role": "system",
"content": f"Error: {e}"
})
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 3000" if not user_token else ""
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
input_visibility = user_token or state['total_tokens'] < 3000
return gr.update(value='', visible=input_visibility), chat_messages, total_tokens_used_msg, state
def clear_conversation():
return gr.update(value=None, visible=True), None, "", get_empty_state()
css = """
#col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
#chatbox {min-height: 400px;}
#header {text-align: center;}
#prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
#total_tokens_str {text-align: right; font-size: 0.8em; color: #666; height: 1em;}
#label {font-size: 0.8em; padding: 0.5em; margin: 0;}
"""
with gr.Blocks(css=css) as demo:
state = gr.State(get_empty_state())
with gr.Column(elem_id="col-container"):
gr.Markdown("""## OpenAI ChatGPT Demo
Using the ofiicial API (gpt-3.5-turbo model)<br>
Prompt templates from [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts).<br>
Current limit is 3000 tokens per conversation.""",
elem_id="header")
with gr.Row():
with gr.Column():
chatbot = gr.Chatbot(elem_id="chatbox")
input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False)
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
btn_clear_conversation = gr.Button("🔃 Start New Conversation")
with gr.Column():
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
gr.Markdown("Enter your own OpenAI API Key to remove the 3000 token limit. You can get it [here](https://platform.openai.com/account/api-keys).", elem_id="label")
user_token = gr.Textbox(placeholder="OpenAI API Key", type="password", show_label=False)
with gr.Accordion("Advanced parameters", open=False):
temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, interactive=True, label="Temperature (higher = more creative/chaotic)")
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response")
gr.HTML('''<br><br><br><center><a href="https://huggingface.co/spaces/anzorq/chatgpt-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>You can duplicate this Space.<br>
Don't forget to set your own <a href="https://platform.openai.com/account/api-keys">OpenAI API Key</a> environment variable in Settings.</center>''')
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
user_token.change(on_token_change, inputs=[user_token], outputs=[])
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template])
demo.launch(debug=True, height='800px')
|