Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from openai import OpenAI
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
anyscale_base_url = "https://api.endpoints.anyscale.com/v1"
|
5 |
|
@@ -14,7 +15,14 @@ def predict(api_key, user_input):
|
|
14 |
max_tokens=100)
|
15 |
|
16 |
response = completion.choices[0].text
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def main():
|
20 |
description = "This is a simple interface to interact with OpenAI’s Chat Completion API. Please enter your API key and your message."
|
@@ -23,7 +31,8 @@ def main():
|
|
23 |
api_key_input = gr.Textbox(label="API Key", placeholder="Enter your API key here", show_label=True, type="password")
|
24 |
user_input = gr.Textbox(label="Your Message", placeholder="Enter your message here")
|
25 |
submit_btn = gr.Button("Submit")
|
26 |
-
output = gr.Textbox(label="
|
|
|
27 |
|
28 |
submit_btn.click(fn=predict, inputs=[api_key_input, user_input], outputs=output)
|
29 |
|
|
|
1 |
from openai import OpenAI
|
2 |
import gradio as gr
|
3 |
+
import time
|
4 |
|
5 |
anyscale_base_url = "https://api.endpoints.anyscale.com/v1"
|
6 |
|
|
|
15 |
max_tokens=100)
|
16 |
|
17 |
response = completion.choices[0].text
|
18 |
+
|
19 |
+
# Simulate streaming by splitting the response and yielding parts with delays
|
20 |
+
words = response.split()
|
21 |
+
for i in range(0, len(words), 5): # Adjust the step to control chunk sizes
|
22 |
+
yield ' '.join(words[i:i+5])
|
23 |
+
time.sleep(1) # Adjust the sleep time to control the streaming speeded
|
24 |
+
|
25 |
+
#return response
|
26 |
|
27 |
def main():
|
28 |
description = "This is a simple interface to interact with OpenAI’s Chat Completion API. Please enter your API key and your message."
|
|
|
31 |
api_key_input = gr.Textbox(label="API Key", placeholder="Enter your API key here", show_label=True, type="password")
|
32 |
user_input = gr.Textbox(label="Your Message", placeholder="Enter your message here")
|
33 |
submit_btn = gr.Button("Submit")
|
34 |
+
output = gr.Textbox(label="LLM Response", live=True)
|
35 |
+
#output = gr.Textbox(label="LLM Response")
|
36 |
|
37 |
submit_btn.click(fn=predict, inputs=[api_key_input, user_input], outputs=output)
|
38 |
|