Christoph Holthaus
commited on
Commit
·
f8a999d
1
Parent(s):
9f4ac5e
WORKING VERSION
Browse files
app.py
CHANGED
@@ -82,9 +82,21 @@ def generate(
|
|
82 |
# Use LLaMa to create chat completion
|
83 |
chat_completion = llm.create_chat_completion(conversation, stream=True)
|
84 |
|
|
|
85 |
# Yield the chat completions
|
86 |
for completion in chat_completion:
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
chat_interface = gr.ChatInterface(
|
90 |
fn=generate,
|
|
|
82 |
# Use LLaMa to create chat completion
|
83 |
chat_completion = llm.create_chat_completion(conversation, stream=True)
|
84 |
|
85 |
+
outputs = []
|
86 |
# Yield the chat completions
|
87 |
for completion in chat_completion:
|
88 |
+
if "content" in completion["choices"][0]["delta"]:
|
89 |
+
outputs.append(completion["choices"][0]['delta']['content'])
|
90 |
+
yield "".join(outputs)
|
91 |
+
|
92 |
+
# t = Thread(target=model.generate, kwargs=generate_kwargs)
|
93 |
+
# t.start()
|
94 |
+
|
95 |
+
# outputs = []
|
96 |
+
# for text in streamer:
|
97 |
+
# outputs.append(text)
|
98 |
+
# yield "".join(outputs)
|
99 |
+
|
100 |
|
101 |
chat_interface = gr.ChatInterface(
|
102 |
fn=generate,
|