Spaces:
Running
Running
ryanrwatkins
commited on
Commit
•
acae979
1
Parent(s):
9e24b1c
Update app.py
Browse files
app.py
CHANGED
@@ -35,14 +35,12 @@ def download_prompt_templates():
|
|
35 |
choices = choices[:1] + sorted(choices[1:])
|
36 |
return gr.update(value=choices[0], choices=choices)
|
37 |
|
38 |
-
#def on_token_change(user_token):
|
39 |
-
# openai.api_key = user_token
|
40 |
|
41 |
def on_prompt_template_change(prompt_template):
|
42 |
if not isinstance(prompt_template, str): return
|
43 |
return prompt_templates[prompt_template]
|
44 |
|
45 |
-
def submit_message(
|
46 |
|
47 |
history = state['messages']
|
48 |
|
@@ -57,14 +55,7 @@ def submit_message(user_token, prompt, prompt_template, temperature, max_tokens,
|
|
57 |
|
58 |
prompt_msg = { "role": "user", "content": prompt }
|
59 |
|
60 |
-
|
61 |
-
history.append(prompt_msg)
|
62 |
-
history.append({
|
63 |
-
"role": "system",
|
64 |
-
"content": "Error: OpenAI API Key is not set."
|
65 |
-
})
|
66 |
-
return '', [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: 0", state
|
67 |
-
|
68 |
try:
|
69 |
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
70 |
|
@@ -130,8 +121,8 @@ with gr.Blocks(css=css) as demo:
|
|
130 |
gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/anzorq/chatgpt-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
|
131 |
<p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.chatgpt_api_demo_hf" alt="visitors"></p></center>''')
|
132 |
|
133 |
-
btn_submit.click(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
134 |
-
input_message.submit(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
135 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
136 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
137 |
#user_token.change(on_token_change, inputs=[user_token], outputs=[])
|
|
|
35 |
choices = choices[:1] + sorted(choices[1:])
|
36 |
return gr.update(value=choices[0], choices=choices)
|
37 |
|
|
|
|
|
38 |
|
39 |
def on_prompt_template_change(prompt_template):
|
40 |
if not isinstance(prompt_template, str): return
|
41 |
return prompt_templates[prompt_template]
|
42 |
|
43 |
+
def submit_message(key, prompt, prompt_template, temperature, max_tokens, context_length, state):
|
44 |
|
45 |
history = state['messages']
|
46 |
|
|
|
55 |
|
56 |
prompt_msg = { "role": "user", "content": prompt }
|
57 |
|
58 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
try:
|
60 |
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
61 |
|
|
|
121 |
gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/anzorq/chatgpt-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
|
122 |
<p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.chatgpt_api_demo_hf" alt="visitors"></p></center>''')
|
123 |
|
124 |
+
btn_submit.click(submit_message, [key, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
125 |
+
input_message.submit(submit_message, [key input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
126 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
127 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
128 |
#user_token.change(on_token_change, inputs=[user_token], outputs=[])
|