Spaces:
Running
Running
SnJForever
commited on
Commit
•
4014ba9
1
Parent(s):
41ab737
Update app.py
Browse files
app.py
CHANGED
@@ -32,12 +32,15 @@ def download_prompt_templates():
|
|
32 |
def on_token_change(user_token):
|
33 |
openai.api_key = user_token
|
34 |
|
|
|
|
|
|
|
35 |
def on_prompt_template_change(prompt_template):
|
36 |
if not isinstance(prompt_template, str): return
|
37 |
return prompt_templates[prompt_template]
|
38 |
|
39 |
-
def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, context_length, state):
|
40 |
-
|
41 |
history = state['messages']
|
42 |
|
43 |
if not prompt:
|
@@ -51,6 +54,14 @@ def submit_message(user_token, prompt, prompt_template, temperature, max_tokens,
|
|
51 |
|
52 |
prompt_msg = { "role": "user", "content": prompt }
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
if not user_token:
|
55 |
history.append(prompt_msg)
|
56 |
history.append({
|
@@ -60,13 +71,25 @@ def submit_message(user_token, prompt, prompt_template, temperature, max_tokens,
|
|
60 |
return '', [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: 0", state
|
61 |
|
62 |
try:
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
except Exception as e:
|
71 |
history.append(prompt_msg)
|
72 |
history.append({
|
@@ -105,13 +128,17 @@ with gr.Blocks(css=css) as demo:
|
|
105 |
elem_id="header")
|
106 |
|
107 |
with gr.Row():
|
108 |
-
with gr.Column():
|
109 |
chatbot = gr.Chatbot(elem_id="chatbox")
|
110 |
-
|
|
|
|
|
|
|
|
|
111 |
btn_submit = gr.Button("Submit")
|
112 |
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
|
113 |
btn_clear_conversation = gr.Button("🔃 Start New Conversation")
|
114 |
-
with gr.Column():
|
115 |
gr.Markdown("Enter your OpenAI API Key. You can get one [here](https://platform.openai.com/account/api-keys).", elem_id="label")
|
116 |
user_token = gr.Textbox(value='', placeholder="OpenAI API Key", type="password", show_label=False)
|
117 |
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
|
@@ -124,8 +151,9 @@ with gr.Blocks(css=css) as demo:
|
|
124 |
# gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/anzorq/chatgpt-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
|
125 |
# <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.chatgpt_api_demo_hf" alt="visitors"></p></center>''')
|
126 |
|
127 |
-
|
128 |
-
|
|
|
129 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
130 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
131 |
user_token.change(on_token_change, inputs=[user_token], outputs=[])
|
@@ -135,4 +163,6 @@ with gr.Blocks(css=css) as demo:
|
|
135 |
|
136 |
|
137 |
demo.queue(concurrency_count=10)
|
138 |
-
demo.launch(
|
|
|
|
|
|
32 |
def on_token_change(user_token):
|
33 |
openai.api_key = user_token
|
34 |
|
35 |
+
def on_type_change(type):
|
36 |
+
print(type)
|
37 |
+
|
38 |
def on_prompt_template_change(prompt_template):
|
39 |
if not isinstance(prompt_template, str): return
|
40 |
return prompt_templates[prompt_template]
|
41 |
|
42 |
+
def submit_message(type_select,user_token, prompt, prompt_template, temperature, max_tokens, context_length, state):
|
43 |
+
print(type_select)
|
44 |
history = state['messages']
|
45 |
|
46 |
if not prompt:
|
|
|
54 |
|
55 |
prompt_msg = { "role": "user", "content": prompt }
|
56 |
|
57 |
+
if not type_select:
|
58 |
+
history.append(prompt_msg)
|
59 |
+
history.append({
|
60 |
+
"role": "system",
|
61 |
+
"content": "Error: Type is not set."
|
62 |
+
})
|
63 |
+
return '', [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: 0", state
|
64 |
+
|
65 |
if not user_token:
|
66 |
history.append(prompt_msg)
|
67 |
history.append({
|
|
|
71 |
return '', [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: 0", state
|
72 |
|
73 |
try:
|
74 |
+
if type_select=='TEXT':
|
75 |
+
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
76 |
+
|
77 |
+
history.append(prompt_msg)
|
78 |
+
history.append(completion.choices[0].message.to_dict())
|
79 |
+
|
80 |
+
state['total_tokens'] += completion['usage']['total_tokens']
|
81 |
+
elif type_select=='IMAGE':
|
82 |
+
response = openai.Image.create(
|
83 |
+
prompt=prompt,
|
84 |
+
n=1,
|
85 |
+
size="512x512"
|
86 |
+
)
|
87 |
+
image_url = response['data'][0]['url']
|
88 |
+
history.append(prompt)
|
89 |
+
history.append(image_url)
|
90 |
+
|
91 |
+
# state['total_tokens'] += completion['usage']['total_tokens']
|
92 |
+
|
93 |
except Exception as e:
|
94 |
history.append(prompt_msg)
|
95 |
history.append({
|
|
|
128 |
elem_id="header")
|
129 |
|
130 |
with gr.Row():
|
131 |
+
with gr.Column(scale=0.7):
|
132 |
chatbot = gr.Chatbot(elem_id="chatbox")
|
133 |
+
with gr.Row():
|
134 |
+
with gr.Column(scale=0.2, min_width=0):
|
135 |
+
type_select = gr.Dropdown(show_label=False, choices= ["TEXT", "IMAGE"],value="TEXT",interactive=True)
|
136 |
+
with gr.Column(scale=0.8):
|
137 |
+
input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False)
|
138 |
btn_submit = gr.Button("Submit")
|
139 |
total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
|
140 |
btn_clear_conversation = gr.Button("🔃 Start New Conversation")
|
141 |
+
with gr.Column(scale=0.3):
|
142 |
gr.Markdown("Enter your OpenAI API Key. You can get one [here](https://platform.openai.com/account/api-keys).", elem_id="label")
|
143 |
user_token = gr.Textbox(value='', placeholder="OpenAI API Key", type="password", show_label=False)
|
144 |
prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
|
|
|
151 |
# gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/anzorq/chatgpt-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
|
152 |
# <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.chatgpt_api_demo_hf" alt="visitors"></p></center>''')
|
153 |
|
154 |
+
type_select.change(on_type_change,inputs=[type_select], outputs=[])
|
155 |
+
btn_submit.click(submit_message, [type_select,user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
156 |
+
input_message.submit(submit_message, [type_select,user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
|
157 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
158 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
159 |
user_token.change(on_token_change, inputs=[user_token], outputs=[])
|
|
|
163 |
|
164 |
|
165 |
demo.queue(concurrency_count=10)
|
166 |
+
demo.launch(
|
167 |
+
auth=("admin", "IBTGeE3NrPsrViDI"),
|
168 |
+
height='800px')
|