Spaces:
Sleeping
Sleeping
ghost717
commited on
Commit
•
c4ef1ac
1
Parent(s):
2bf5def
gradio chatbot examples
Browse files- .gitignore +243 -1
- app.py +6 -4
- chat-10-api.py +47 -0
- chat-2-interface.py +15 -0
- chat-3-interface.py +9 -0
- chat-4.py +21 -0
- chat-5.py +10 -0
- chat-6.py +19 -0
- chat-7.py +18 -0
- chat-8-api.py +26 -0
- chat-9-api.py +25 -0
.gitignore
CHANGED
@@ -3,4 +3,246 @@
|
|
3 |
|
4 |
env
|
5 |
venv
|
6 |
-
temp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
env
|
5 |
venv
|
6 |
+
temp
|
7 |
+
gradio_cached_examples
|
8 |
+
|
9 |
+
# File created using '.gitignore Generator' for Visual Studio Code: https://bit.ly/vscode-gig
|
10 |
+
# Created by https://www.toptal.com/developers/gitignore/api/windows,visualstudiocode,git,python
|
11 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=windows,visualstudiocode,git,python
|
12 |
+
|
13 |
+
### Git ###
|
14 |
+
# Created by git for backups. To disable backups in Git:
|
15 |
+
# $ git config --global mergetool.keepBackup false
|
16 |
+
*.orig
|
17 |
+
|
18 |
+
# Created by git when using merge tools for conflicts
|
19 |
+
*.BACKUP.*
|
20 |
+
*.BASE.*
|
21 |
+
*.LOCAL.*
|
22 |
+
*.REMOTE.*
|
23 |
+
*_BACKUP_*.txt
|
24 |
+
*_BASE_*.txt
|
25 |
+
*_LOCAL_*.txt
|
26 |
+
*_REMOTE_*.txt
|
27 |
+
|
28 |
+
### Python ###
|
29 |
+
# Byte-compiled / optimized / DLL files
|
30 |
+
__pycache__/
|
31 |
+
*.py[cod]
|
32 |
+
*$py.class
|
33 |
+
|
34 |
+
# C extensions
|
35 |
+
*.so
|
36 |
+
|
37 |
+
# Distribution / packaging
|
38 |
+
.Python
|
39 |
+
build/
|
40 |
+
develop-eggs/
|
41 |
+
dist/
|
42 |
+
downloads/
|
43 |
+
eggs/
|
44 |
+
.eggs/
|
45 |
+
lib/
|
46 |
+
lib64/
|
47 |
+
parts/
|
48 |
+
sdist/
|
49 |
+
var/
|
50 |
+
wheels/
|
51 |
+
share/python-wheels/
|
52 |
+
*.egg-info/
|
53 |
+
.installed.cfg
|
54 |
+
*.egg
|
55 |
+
MANIFEST
|
56 |
+
|
57 |
+
# PyInstaller
|
58 |
+
# Usually these files are written by a python script from a template
|
59 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
60 |
+
*.manifest
|
61 |
+
*.spec
|
62 |
+
|
63 |
+
# Installer logs
|
64 |
+
pip-log.txt
|
65 |
+
pip-delete-this-directory.txt
|
66 |
+
|
67 |
+
# Unit test / coverage reports
|
68 |
+
htmlcov/
|
69 |
+
.tox/
|
70 |
+
.nox/
|
71 |
+
.coverage
|
72 |
+
.coverage.*
|
73 |
+
.cache
|
74 |
+
nosetests.xml
|
75 |
+
coverage.xml
|
76 |
+
*.cover
|
77 |
+
*.py,cover
|
78 |
+
.hypothesis/
|
79 |
+
.pytest_cache/
|
80 |
+
cover/
|
81 |
+
|
82 |
+
# Translations
|
83 |
+
*.mo
|
84 |
+
*.pot
|
85 |
+
|
86 |
+
# Django stuff:
|
87 |
+
*.log
|
88 |
+
local_settings.py
|
89 |
+
db.sqlite3
|
90 |
+
db.sqlite3-journal
|
91 |
+
|
92 |
+
# Flask stuff:
|
93 |
+
instance/
|
94 |
+
.webassets-cache
|
95 |
+
|
96 |
+
# Scrapy stuff:
|
97 |
+
.scrapy
|
98 |
+
|
99 |
+
# Sphinx documentation
|
100 |
+
docs/_build/
|
101 |
+
|
102 |
+
# PyBuilder
|
103 |
+
.pybuilder/
|
104 |
+
target/
|
105 |
+
|
106 |
+
# Jupyter Notebook
|
107 |
+
.ipynb_checkpoints
|
108 |
+
|
109 |
+
# IPython
|
110 |
+
profile_default/
|
111 |
+
ipython_config.py
|
112 |
+
|
113 |
+
# pyenv
|
114 |
+
# For a library or package, you might want to ignore these files since the code is
|
115 |
+
# intended to run in multiple environments; otherwise, check them in:
|
116 |
+
# .python-version
|
117 |
+
|
118 |
+
# pipenv
|
119 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
120 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
121 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
122 |
+
# install all needed dependencies.
|
123 |
+
#Pipfile.lock
|
124 |
+
|
125 |
+
# poetry
|
126 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
127 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
128 |
+
# commonly ignored for libraries.
|
129 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
130 |
+
#poetry.lock
|
131 |
+
|
132 |
+
# pdm
|
133 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
134 |
+
#pdm.lock
|
135 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
136 |
+
# in version control.
|
137 |
+
# https://pdm.fming.dev/#use-with-ide
|
138 |
+
.pdm.toml
|
139 |
+
|
140 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
141 |
+
__pypackages__/
|
142 |
+
|
143 |
+
# Celery stuff
|
144 |
+
celerybeat-schedule
|
145 |
+
celerybeat.pid
|
146 |
+
|
147 |
+
# SageMath parsed files
|
148 |
+
*.sage.py
|
149 |
+
|
150 |
+
# Environments
|
151 |
+
.env
|
152 |
+
.venv
|
153 |
+
env/
|
154 |
+
venv/
|
155 |
+
ENV/
|
156 |
+
env.bak/
|
157 |
+
venv.bak/
|
158 |
+
|
159 |
+
# Spyder project settings
|
160 |
+
.spyderproject
|
161 |
+
.spyproject
|
162 |
+
|
163 |
+
# Rope project settings
|
164 |
+
.ropeproject
|
165 |
+
|
166 |
+
# mkdocs documentation
|
167 |
+
/site
|
168 |
+
|
169 |
+
# mypy
|
170 |
+
.mypy_cache/
|
171 |
+
.dmypy.json
|
172 |
+
dmypy.json
|
173 |
+
|
174 |
+
# Pyre type checker
|
175 |
+
.pyre/
|
176 |
+
|
177 |
+
# pytype static type analyzer
|
178 |
+
.pytype/
|
179 |
+
|
180 |
+
# Cython debug symbols
|
181 |
+
cython_debug/
|
182 |
+
|
183 |
+
# PyCharm
|
184 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
185 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
186 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
187 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
188 |
+
#.idea/
|
189 |
+
|
190 |
+
### Python Patch ###
|
191 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
192 |
+
poetry.toml
|
193 |
+
|
194 |
+
# ruff
|
195 |
+
.ruff_cache/
|
196 |
+
|
197 |
+
# LSP config files
|
198 |
+
pyrightconfig.json
|
199 |
+
|
200 |
+
### VisualStudioCode ###
|
201 |
+
.vscode/*
|
202 |
+
!.vscode/settings.json
|
203 |
+
!.vscode/tasks.json
|
204 |
+
!.vscode/launch.json
|
205 |
+
!.vscode/extensions.json
|
206 |
+
!.vscode/*.code-snippets
|
207 |
+
|
208 |
+
# Local History for Visual Studio Code
|
209 |
+
.history/
|
210 |
+
|
211 |
+
# Built Visual Studio Code Extensions
|
212 |
+
*.vsix
|
213 |
+
|
214 |
+
### VisualStudioCode Patch ###
|
215 |
+
# Ignore all local history of files
|
216 |
+
.history
|
217 |
+
.ionide
|
218 |
+
|
219 |
+
### Windows ###
|
220 |
+
# Windows thumbnail cache files
|
221 |
+
Thumbs.db
|
222 |
+
Thumbs.db:encryptable
|
223 |
+
ehthumbs.db
|
224 |
+
ehthumbs_vista.db
|
225 |
+
|
226 |
+
# Dump file
|
227 |
+
*.stackdump
|
228 |
+
|
229 |
+
# Folder config file
|
230 |
+
[Dd]esktop.ini
|
231 |
+
|
232 |
+
# Recycle Bin used on file shares
|
233 |
+
$RECYCLE.BIN/
|
234 |
+
|
235 |
+
# Windows Installer files
|
236 |
+
*.cab
|
237 |
+
*.msi
|
238 |
+
*.msix
|
239 |
+
*.msm
|
240 |
+
*.msp
|
241 |
+
|
242 |
+
# Windows shortcuts
|
243 |
+
*.lnk
|
244 |
+
|
245 |
+
# End of https://www.toptal.com/developers/gitignore/api/windows,visualstudiocode,git,python
|
246 |
+
|
247 |
+
# Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option)
|
248 |
+
|
app.py
CHANGED
@@ -1,7 +1,9 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
def
|
4 |
-
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
demo.launch()
|
|
|
1 |
+
import time
|
2 |
import gradio as gr
|
3 |
|
4 |
+
def slow_echo(message, history):
|
5 |
+
for i in range(len(message)):
|
6 |
+
time.sleep(0.3)
|
7 |
+
yield "You typed: " + message[: i+1]
|
8 |
|
9 |
+
gr.ChatInterface(slow_echo).launch()
|
|
chat-10-api.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
4 |
+
from threading import Thread
|
5 |
+
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1")
|
7 |
+
model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1", torch_dtype=torch.float16)
|
8 |
+
model = model.to('cuda:0')
|
9 |
+
|
10 |
+
class StopOnTokens(StoppingCriteria):
|
11 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
12 |
+
stop_ids = [29, 0]
|
13 |
+
for stop_id in stop_ids:
|
14 |
+
if input_ids[0][-1] == stop_id:
|
15 |
+
return True
|
16 |
+
return False
|
17 |
+
|
18 |
+
def predict(message, history):
|
19 |
+
history_transformer_format = history + [[message, ""]]
|
20 |
+
stop = StopOnTokens()
|
21 |
+
|
22 |
+
messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]])
|
23 |
+
for item in history_transformer_format])
|
24 |
+
|
25 |
+
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
26 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
27 |
+
generate_kwargs = dict(
|
28 |
+
model_inputs,
|
29 |
+
streamer=streamer,
|
30 |
+
max_new_tokens=1024,
|
31 |
+
do_sample=True,
|
32 |
+
top_p=0.95,
|
33 |
+
top_k=1000,
|
34 |
+
temperature=1.0,
|
35 |
+
num_beams=1,
|
36 |
+
stopping_criteria=StoppingCriteriaList([stop])
|
37 |
+
)
|
38 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
39 |
+
t.start()
|
40 |
+
|
41 |
+
partial_message = ""
|
42 |
+
for new_token in streamer:
|
43 |
+
if new_token != '<':
|
44 |
+
partial_message += new_token
|
45 |
+
yield partial_message
|
46 |
+
|
47 |
+
gr.ChatInterface(predict).launch()
|
chat-2-interface.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
# def greet(name):
|
4 |
+
# return "Hello " + name + "!!"
|
5 |
+
|
6 |
+
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
+
# demo.launch()
|
8 |
+
|
9 |
+
import random
|
10 |
+
|
11 |
+
def random_response(message, history):
|
12 |
+
return random.choice(["Yes", "No"])
|
13 |
+
|
14 |
+
|
15 |
+
gr.ChatInterface(random_response).launch()
|
chat-3-interface.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
def slow_echo(message, history):
|
5 |
+
for i in range(len(message)):
|
6 |
+
time.sleep(0.3)
|
7 |
+
yield "You typed: " + message[: i+1]
|
8 |
+
|
9 |
+
gr.ChatInterface(slow_echo).launch()
|
chat-4.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def yes_man(message, history):
|
4 |
+
if message.endswith("?"):
|
5 |
+
return "Yes"
|
6 |
+
else:
|
7 |
+
return "Ask me anything!"
|
8 |
+
|
9 |
+
gr.ChatInterface(
|
10 |
+
yes_man,
|
11 |
+
chatbot=gr.Chatbot(height=300),
|
12 |
+
textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=7),
|
13 |
+
title="Yes Man",
|
14 |
+
description="Ask Yes Man any question",
|
15 |
+
theme="soft",
|
16 |
+
examples=["Hello", "Am I cool?", "Are tomatoes vegetables?"],
|
17 |
+
cache_examples=True,
|
18 |
+
retry_btn=None,
|
19 |
+
undo_btn="Delete Previous",
|
20 |
+
clear_btn="Clear",
|
21 |
+
).launch()
|
chat-5.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import time
|
3 |
+
|
4 |
+
def count_files(message, history):
|
5 |
+
num_files = len(message["files"])
|
6 |
+
return f"You uploaded {num_files} files"
|
7 |
+
|
8 |
+
demo = gr.ChatInterface(fn=count_files, examples=[{"text": "Hello", "files": []}], title="Echo Bot", multimodal=True)
|
9 |
+
|
10 |
+
demo.launch()
|
chat-6.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import time
|
3 |
+
|
4 |
+
def echo(message, history, system_prompt, tokens):
|
5 |
+
response = f"System prompt: {system_prompt}\n Message: {message}."
|
6 |
+
for i in range(min(len(response), int(tokens))):
|
7 |
+
time.sleep(0.05)
|
8 |
+
yield response[: i+1]
|
9 |
+
|
10 |
+
demo = gr.ChatInterface(echo,
|
11 |
+
additional_inputs=[
|
12 |
+
gr.Textbox("You are helpful AI.", label="System Prompt"),
|
13 |
+
gr.Slider(10, 100)
|
14 |
+
]
|
15 |
+
)
|
16 |
+
|
17 |
+
if __name__ == "__main__":
|
18 |
+
demo.queue().launch()
|
19 |
+
|
chat-7.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import time
|
3 |
+
|
4 |
+
def echo(message, history, system_prompt, tokens):
|
5 |
+
response = f"System prompt: {system_prompt}\n Message: {message}."
|
6 |
+
for i in range(min(len(response), int(tokens))):
|
7 |
+
time.sleep(0.05)
|
8 |
+
yield response[: i+1]
|
9 |
+
|
10 |
+
with gr.Blocks() as demo:
|
11 |
+
system_prompt = gr.Textbox("You are helpful AI.", label="System Prompt")
|
12 |
+
slider = gr.Slider(10, 100, render=False)
|
13 |
+
|
14 |
+
gr.ChatInterface(
|
15 |
+
echo, additional_inputs=[system_prompt, slider]
|
16 |
+
)
|
17 |
+
|
18 |
+
demo.launch()
|
chat-8-api.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from langchain.chat_models import ChatOpenAI
|
3 |
+
from langchain.schema import AIMessage, HumanMessage
|
4 |
+
from langchain_community.chat_models import ChatOpenAI
|
5 |
+
from langchain_openai import ChatOpenAI
|
6 |
+
import openai, os
|
7 |
+
import gradio as gr
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
OPENAI_API_KEY = os.environ['GROQ_API_KEY']
|
12 |
+
|
13 |
+
# llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')
|
14 |
+
llm = openai(temperature=1.0, model='gpt-4-turbo')
|
15 |
+
# llm = openai(temperature=1.0, model='gpt-4o')
|
16 |
+
|
17 |
+
def predict(message, history):
|
18 |
+
history_langchain_format = []
|
19 |
+
for human, ai in history:
|
20 |
+
history_langchain_format.append(HumanMessage(content=human))
|
21 |
+
history_langchain_format.append(AIMessage(content=ai))
|
22 |
+
history_langchain_format.append(HumanMessage(content=message))
|
23 |
+
gpt_response = llm(history_langchain_format)
|
24 |
+
return gpt_response.content
|
25 |
+
|
26 |
+
gr.ChatInterface(predict).launch()
|
chat-9-api.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
api_key = "sk-..." # Replace with your key
|
5 |
+
client = OpenAI(api_key=api_key)
|
6 |
+
|
7 |
+
def predict(message, history):
|
8 |
+
history_openai_format = []
|
9 |
+
for human, assistant in history:
|
10 |
+
history_openai_format.append({"role": "user", "content": human })
|
11 |
+
history_openai_format.append({"role": "assistant", "content":assistant})
|
12 |
+
history_openai_format.append({"role": "user", "content": message})
|
13 |
+
|
14 |
+
response = client.chat.completions.create(model='gpt-3.5-turbo',
|
15 |
+
messages= history_openai_format,
|
16 |
+
temperature=1.0,
|
17 |
+
stream=True)
|
18 |
+
|
19 |
+
partial_message = ""
|
20 |
+
for chunk in response:
|
21 |
+
if chunk.choices[0].delta.content is not None:
|
22 |
+
partial_message = partial_message + chunk.choices[0].delta.content
|
23 |
+
yield partial_message
|
24 |
+
|
25 |
+
gr.ChatInterface(predict).launch()
|