Spaces:
Paused
Paused
Alexandra Kueck
commited on
Commit
·
91feea3
0
Parent(s):
Duplicate from alexkueck/ChatBotLI2Klein
Browse files- .gitattributes +34 -0
- README.md +13 -0
- app.py +222 -0
- cookies.json +132 -0
- custom.css +191 -0
- custom.js +1 -0
- presets.py +86 -0
- requirements.txt +8 -0
- utils.py +195 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: ChatBotLIModelle
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.29.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: alexkueck/ChatBotLI2Klein
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
#from transformers import pipeline
|
6 |
+
import torch
|
7 |
+
from utils import *
|
8 |
+
from presets import *
|
9 |
+
|
10 |
+
#antwort=""
|
11 |
+
######################################################################
|
12 |
+
#Modelle und Tokenizer
|
13 |
+
|
14 |
+
#Hugging Chat nutzen
|
15 |
+
# Create a chatbot connection
|
16 |
+
#chatbot = hugchat.ChatBot(cookie_path="cookies.json")
|
17 |
+
|
18 |
+
#Alternativ mit beliebigen Modellen:
|
19 |
+
base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
|
20 |
+
#base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
|
21 |
+
#base_model = "EleutherAI/gpt-neo-1.3B" #load_8bit = False (in load_tokenizer_and_model)
|
22 |
+
#base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
|
23 |
+
tokenizer,model,device = load_tokenizer_and_model(base_model,False)
|
24 |
+
|
25 |
+
|
26 |
+
########################################################################
|
27 |
+
#Chat KI nutzen, um Text zu generieren...
|
28 |
+
def predict(text,
|
29 |
+
chatbotGr,
|
30 |
+
history,
|
31 |
+
top_p,
|
32 |
+
temperature,
|
33 |
+
max_length_tokens,
|
34 |
+
max_context_length_tokens,):
|
35 |
+
if text=="":
|
36 |
+
yield chatbotGr,history,"Empty context."
|
37 |
+
return
|
38 |
+
try:
|
39 |
+
model
|
40 |
+
except:
|
41 |
+
yield [[text,"No Model Found"]],[],"No Model Found"
|
42 |
+
return
|
43 |
+
|
44 |
+
inputs = generate_prompt_with_history(text,history,tokenizer,max_length=max_context_length_tokens)
|
45 |
+
if inputs is None:
|
46 |
+
yield chatbotGr,history,"Input too long."
|
47 |
+
return
|
48 |
+
else:
|
49 |
+
prompt,inputs=inputs
|
50 |
+
begin_length = len(prompt)
|
51 |
+
|
52 |
+
input_ids = inputs["input_ids"][:,-max_context_length_tokens:].to(device)
|
53 |
+
torch.cuda.empty_cache()
|
54 |
+
|
55 |
+
#torch.no_grad() bedeutet, dass für die betreffenden tensoren keine Ableitungen berechnet werden bei der backpropagation
|
56 |
+
#hier soll das NN ja auch nicht geändert werden 8backprop ist nicht nötig), da es um interference-prompts geht!
|
57 |
+
with torch.no_grad():
|
58 |
+
#die vergangenen prompts werden alle als Tupel in history abgelegt sortiert nach 'Human' und 'AI'- dass sind daher auch die stop-words, die den jeweils nächsten Eintrag kennzeichnen
|
59 |
+
for x in greedy_search(input_ids,model,tokenizer,stop_words=["[|Human|]", "[|AI|]"],max_length=max_length_tokens,temperature=temperature,top_p=top_p):
|
60 |
+
if is_stop_word_or_prefix(x,["[|Human|]", "[|AI|]"]) is False:
|
61 |
+
if "[|Human|]" in x:
|
62 |
+
x = x[:x.index("[|Human|]")].strip()
|
63 |
+
if "[|AI|]" in x:
|
64 |
+
x = x[:x.index("[|AI|]")].strip()
|
65 |
+
x = x.strip()
|
66 |
+
a, b= [[y[0],convert_to_markdown(y[1])] for y in history]+[[text, convert_to_markdown(x)]],history + [[text,x]]
|
67 |
+
yield a, b, "Generating..."
|
68 |
+
if shared_state.interrupted:
|
69 |
+
shared_state.recover()
|
70 |
+
try:
|
71 |
+
yield a, b, "Stop: Success"
|
72 |
+
return
|
73 |
+
except:
|
74 |
+
pass
|
75 |
+
del input_ids
|
76 |
+
gc.collect()
|
77 |
+
torch.cuda.empty_cache()
|
78 |
+
|
79 |
+
try:
|
80 |
+
yield a,b,"Generate: Success"
|
81 |
+
except:
|
82 |
+
pass
|
83 |
+
|
84 |
+
|
85 |
+
def reset_chat():
|
86 |
+
#id_new = chatbot.new_conversation()
|
87 |
+
#chatbot.change_conversation(id_new)
|
88 |
+
reset_textbox()
|
89 |
+
|
90 |
+
|
91 |
+
##########################################################
|
92 |
+
#Übersetzungs Ki nutzen
|
93 |
+
def translate():
|
94 |
+
return "Kommt noch!"
|
95 |
+
|
96 |
+
#Programmcode KI
|
97 |
+
def coding():
|
98 |
+
return "Kommt noch!"
|
99 |
+
|
100 |
+
#######################################################################
|
101 |
+
#Darstellung mit Gradio
|
102 |
+
|
103 |
+
with open("custom.css", "r", encoding="utf-8") as f:
|
104 |
+
customCSS = f.read()
|
105 |
+
|
106 |
+
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
|
107 |
+
history = gr.State([])
|
108 |
+
user_question = gr.State("")
|
109 |
+
gr.Markdown("KIs am LI - wähle aus, was du bzgl. KI-Bots ausprobieren möchtest!")
|
110 |
+
with gr.Tabs():
|
111 |
+
with gr.TabItem("LI-Chat"):
|
112 |
+
with gr.Row():
|
113 |
+
gr.HTML(title)
|
114 |
+
status_display = gr.Markdown("Erfolg", elem_id="status_display")
|
115 |
+
gr.Markdown(description_top)
|
116 |
+
with gr.Row(scale=1).style(equal_height=True):
|
117 |
+
with gr.Column(scale=5):
|
118 |
+
with gr.Row(scale=1):
|
119 |
+
chatbotGr = gr.Chatbot(elem_id="LI_chatbot").style(height="100%")
|
120 |
+
with gr.Row(scale=1):
|
121 |
+
with gr.Column(scale=12):
|
122 |
+
user_input = gr.Textbox(
|
123 |
+
show_label=False, placeholder="Gib deinen Text / Frage ein."
|
124 |
+
).style(container=False)
|
125 |
+
with gr.Column(min_width=100, scale=1):
|
126 |
+
submitBtn = gr.Button("Absenden")
|
127 |
+
with gr.Column(min_width=100, scale=1):
|
128 |
+
cancelBtn = gr.Button("Stoppen")
|
129 |
+
with gr.Row(scale=1):
|
130 |
+
emptyBtn = gr.Button(
|
131 |
+
"🧹 Neuer Chat",
|
132 |
+
)
|
133 |
+
with gr.Column():
|
134 |
+
with gr.Column(min_width=50, scale=1):
|
135 |
+
with gr.Tab(label="Parameter zum Model"):
|
136 |
+
gr.Markdown("# Parameters")
|
137 |
+
top_p = gr.Slider(
|
138 |
+
minimum=-0,
|
139 |
+
maximum=1.0,
|
140 |
+
value=0.95,
|
141 |
+
step=0.05,
|
142 |
+
interactive=True,
|
143 |
+
label="Top-p",
|
144 |
+
)
|
145 |
+
temperature = gr.Slider(
|
146 |
+
minimum=0.1,
|
147 |
+
maximum=2.0,
|
148 |
+
value=1,
|
149 |
+
step=0.1,
|
150 |
+
interactive=True,
|
151 |
+
label="Temperature",
|
152 |
+
)
|
153 |
+
max_length_tokens = gr.Slider(
|
154 |
+
minimum=0,
|
155 |
+
maximum=512,
|
156 |
+
value=512,
|
157 |
+
step=8,
|
158 |
+
interactive=True,
|
159 |
+
label="Max Generation Tokens",
|
160 |
+
)
|
161 |
+
max_context_length_tokens = gr.Slider(
|
162 |
+
minimum=0,
|
163 |
+
maximum=4096,
|
164 |
+
value=2048,
|
165 |
+
step=128,
|
166 |
+
interactive=True,
|
167 |
+
label="Max History Tokens",
|
168 |
+
)
|
169 |
+
gr.Markdown(description)
|
170 |
+
|
171 |
+
with gr.TabItem("Übersetzungen"):
|
172 |
+
with gr.Row():
|
173 |
+
gr.Textbox(
|
174 |
+
show_label=False, placeholder="Ist noch in Arbeit..."
|
175 |
+
).style(container=False)
|
176 |
+
with gr.TabItem("Code-Generierungen"):
|
177 |
+
with gr.Row():
|
178 |
+
gr.Textbox(
|
179 |
+
show_label=False, placeholder="Ist noch in Arbeit..."
|
180 |
+
).style(container=False)
|
181 |
+
|
182 |
+
predict_args = dict(
|
183 |
+
fn=predict,
|
184 |
+
inputs=[
|
185 |
+
user_question,
|
186 |
+
chatbotGr,
|
187 |
+
history,
|
188 |
+
top_p,
|
189 |
+
temperature,
|
190 |
+
max_length_tokens,
|
191 |
+
max_context_length_tokens,
|
192 |
+
],
|
193 |
+
outputs=[chatbotGr, history, status_display],
|
194 |
+
show_progress=True,
|
195 |
+
)
|
196 |
+
|
197 |
+
#neuer Chat
|
198 |
+
reset_args = dict(
|
199 |
+
#fn=reset_chat, inputs=[], outputs=[user_input, status_display]
|
200 |
+
fn=reset_textbox, inputs=[], outputs=[user_input, status_display]
|
201 |
+
)
|
202 |
+
|
203 |
+
# Chatbot
|
204 |
+
transfer_input_args = dict(
|
205 |
+
fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn], show_progress=True
|
206 |
+
)
|
207 |
+
|
208 |
+
#Listener auf Start-Click auf Button oder Return
|
209 |
+
predict_event1 = user_input.submit(**transfer_input_args).then(**predict_args)
|
210 |
+
predict_event2 = submitBtn.click(**transfer_input_args).then(**predict_args)
|
211 |
+
|
212 |
+
#Listener, Wenn reset...
|
213 |
+
emptyBtn.click(
|
214 |
+
reset_state,
|
215 |
+
outputs=[chatbotGr, history, status_display],
|
216 |
+
show_progress=True,
|
217 |
+
)
|
218 |
+
emptyBtn.click(**reset_args)
|
219 |
+
|
220 |
+
demo.title = "LI Chat"
|
221 |
+
#demo.queue(concurrency_count=1).launch(share=True)
|
222 |
+
demo.queue(concurrency_count=1).launch(debug=True)
|
cookies.json
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"domain": ".huggingface.co",
|
4 |
+
"expirationDate": 1684947627,
|
5 |
+
"hostOnly": false,
|
6 |
+
"httpOnly": false,
|
7 |
+
"name": "intercom-session-hgve3glw",
|
8 |
+
"path": "/",
|
9 |
+
"sameSite": "lax",
|
10 |
+
"secure": false,
|
11 |
+
"session": false,
|
12 |
+
"storeId": null,
|
13 |
+
"value": ""
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"domain": ".huggingface.co",
|
17 |
+
"expirationDate": 1716107825,
|
18 |
+
"hostOnly": false,
|
19 |
+
"httpOnly": false,
|
20 |
+
"name": "__stripe_mid",
|
21 |
+
"path": "/",
|
22 |
+
"sameSite": "strict",
|
23 |
+
"secure": true,
|
24 |
+
"session": false,
|
25 |
+
"storeId": null,
|
26 |
+
"value": "ee917974-9f17-465f-bd5f-98c3ef273dcebfc299"
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"domain": ".huggingface.co",
|
30 |
+
"expirationDate": 1719131825.057236,
|
31 |
+
"hostOnly": false,
|
32 |
+
"httpOnly": false,
|
33 |
+
"name": "_ga",
|
34 |
+
"path": "/",
|
35 |
+
"sameSite": null,
|
36 |
+
"secure": false,
|
37 |
+
"session": false,
|
38 |
+
"storeId": null,
|
39 |
+
"value": "GA1.1.1854576425.1683459014"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"domain": ".huggingface.co",
|
43 |
+
"expirationDate": 1715265897.480276,
|
44 |
+
"hostOnly": false,
|
45 |
+
"httpOnly": true,
|
46 |
+
"name": "token",
|
47 |
+
"path": "/",
|
48 |
+
"sameSite": "lax",
|
49 |
+
"secure": true,
|
50 |
+
"session": false,
|
51 |
+
"storeId": null,
|
52 |
+
"value": "XPKKEnVzdMeoBdDLQWVipFyNlQAEAHWQohUpGAfcoHwPXNZogPpxHYWbdDcGRdiSrZcOCHFsKuPvVIQwMsybldJAmgkzemIAcjPHwizDfPeitRXgmSlfPpDGFBvFHVsM"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"domain": ".huggingface.co",
|
56 |
+
"expirationDate": 1707672827,
|
57 |
+
"hostOnly": false,
|
58 |
+
"httpOnly": false,
|
59 |
+
"name": "intercom-device-id-hgve3glw",
|
60 |
+
"path": "/",
|
61 |
+
"sameSite": "lax",
|
62 |
+
"secure": false,
|
63 |
+
"session": false,
|
64 |
+
"storeId": null,
|
65 |
+
"value": "7ff02e75-b8a1-43e8-8af3-2520b153983e"
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"domain": ".huggingface.co",
|
69 |
+
"expirationDate": 1684573625,
|
70 |
+
"hostOnly": false,
|
71 |
+
"httpOnly": false,
|
72 |
+
"name": "__stripe_sid",
|
73 |
+
"path": "/",
|
74 |
+
"sameSite": "strict",
|
75 |
+
"secure": true,
|
76 |
+
"session": false,
|
77 |
+
"storeId": null,
|
78 |
+
"value": "e18d3fcd-4185-49bc-b24e-463d3eb18f443c0c01"
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"domain": ".huggingface.co",
|
82 |
+
"expirationDate": 1719131825.056149,
|
83 |
+
"hostOnly": false,
|
84 |
+
"httpOnly": false,
|
85 |
+
"name": "_ga_8Q63TH4CSL",
|
86 |
+
"path": "/",
|
87 |
+
"sameSite": null,
|
88 |
+
"secure": false,
|
89 |
+
"session": false,
|
90 |
+
"storeId": null,
|
91 |
+
"value": "GS1.1.1684571540.41.1.1684571825.0.0.0"
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"domain": ".huggingface.co",
|
95 |
+
"expirationDate": 1684658225,
|
96 |
+
"hostOnly": false,
|
97 |
+
"httpOnly": false,
|
98 |
+
"name": "_gid",
|
99 |
+
"path": "/",
|
100 |
+
"sameSite": null,
|
101 |
+
"secure": false,
|
102 |
+
"session": false,
|
103 |
+
"storeId": null,
|
104 |
+
"value": "GA1.2.1709365894.1683962834"
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"domain": "huggingface.co",
|
108 |
+
"expirationDate": 1716194076.862726,
|
109 |
+
"hostOnly": true,
|
110 |
+
"httpOnly": true,
|
111 |
+
"name": "hf-chat",
|
112 |
+
"path": "/",
|
113 |
+
"sameSite": "no_restriction",
|
114 |
+
"secure": true,
|
115 |
+
"session": false,
|
116 |
+
"storeId": null,
|
117 |
+
"value": "65609f48-0d3b-4b69-931b-cd572b1fc88d"
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"domain": ".huggingface.co",
|
121 |
+
"expirationDate": 1707672827,
|
122 |
+
"hostOnly": false,
|
123 |
+
"httpOnly": false,
|
124 |
+
"name": "intercom-id-hgve3glw",
|
125 |
+
"path": "/",
|
126 |
+
"sameSite": "lax",
|
127 |
+
"secure": false,
|
128 |
+
"session": false,
|
129 |
+
"storeId": null,
|
130 |
+
"value": "80725bd4-464f-425b-b9c9-313cf5b23012"
|
131 |
+
}
|
132 |
+
]
|
custom.css
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
:root {
|
2 |
+
--chatbot-color-light: #F3F3F3;
|
3 |
+
--chatbot-color-dark: #121111;
|
4 |
+
}
|
5 |
+
|
6 |
+
/* status_display */
|
7 |
+
#status_display {
|
8 |
+
display: flex;
|
9 |
+
min-height: 2.5em;
|
10 |
+
align-items: flex-end;
|
11 |
+
justify-content: flex-end;
|
12 |
+
}
|
13 |
+
#status_display p {
|
14 |
+
font-size: .85em;
|
15 |
+
font-family: monospace;
|
16 |
+
color: var(--body-text-color-subdued);
|
17 |
+
}
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
/* usage_display */
|
22 |
+
#usage_display {
|
23 |
+
height: 1em;
|
24 |
+
}
|
25 |
+
#usage_display p{
|
26 |
+
padding: 0 1em;
|
27 |
+
font-size: .85em;
|
28 |
+
font-family: monospace;
|
29 |
+
color: var(--body-text-color-subdued);
|
30 |
+
}
|
31 |
+
/* list */
|
32 |
+
ol:not(.options), ul:not(.options) {
|
33 |
+
padding-inline-start: 2em !important;
|
34 |
+
}
|
35 |
+
|
36 |
+
/* Thank @Keldos-Li for fixing it */
|
37 |
+
/* Light mode (default) */
|
38 |
+
#chuanhu_chatbot {
|
39 |
+
background-color: var(--chatbot-color-light) !important;
|
40 |
+
color: #000000 !important;
|
41 |
+
}
|
42 |
+
[data-testid = "bot"] {
|
43 |
+
background-color: #FFFFFF !important;
|
44 |
+
}
|
45 |
+
[data-testid = "user"] {
|
46 |
+
background-color: #95EC69 !important;
|
47 |
+
}
|
48 |
+
|
49 |
+
/* Dark mode */
|
50 |
+
.dark #chuanhu_chatbot {
|
51 |
+
background-color: var(--chatbot-color-dark) !important;
|
52 |
+
color: #FFFFFF !important;
|
53 |
+
}
|
54 |
+
.dark [data-testid = "bot"] {
|
55 |
+
background-color: #2C2C2C !important;
|
56 |
+
}
|
57 |
+
.dark [data-testid = "user"] {
|
58 |
+
background-color: #26B561 !important;
|
59 |
+
}
|
60 |
+
|
61 |
+
#chuanhu_chatbot {
|
62 |
+
height: 100%;
|
63 |
+
min-height: 400px;
|
64 |
+
}
|
65 |
+
|
66 |
+
[class *= "message"] {
|
67 |
+
border-radius: var(--radius-xl) !important;
|
68 |
+
border: none;
|
69 |
+
padding: var(--spacing-xl) !important;
|
70 |
+
font-size: var(--text-md) !important;
|
71 |
+
line-height: var(--line-md) !important;
|
72 |
+
min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
73 |
+
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
74 |
+
}
|
75 |
+
[data-testid = "bot"] {
|
76 |
+
max-width: 85%;
|
77 |
+
border-bottom-left-radius: 0 !important;
|
78 |
+
}
|
79 |
+
[data-testid = "user"] {
|
80 |
+
max-width: 85%;
|
81 |
+
width: auto !important;
|
82 |
+
border-bottom-right-radius: 0 !important;
|
83 |
+
}
|
84 |
+
/* Table */
|
85 |
+
table {
|
86 |
+
margin: 1em 0;
|
87 |
+
border-collapse: collapse;
|
88 |
+
empty-cells: show;
|
89 |
+
}
|
90 |
+
td,th {
|
91 |
+
border: 1.2px solid var(--border-color-primary) !important;
|
92 |
+
padding: 0.2em;
|
93 |
+
}
|
94 |
+
thead {
|
95 |
+
background-color: rgba(175,184,193,0.2);
|
96 |
+
}
|
97 |
+
thead th {
|
98 |
+
padding: .5em .2em;
|
99 |
+
}
|
100 |
+
/* Inline code */
|
101 |
+
#chuanhu_chatbot code {
|
102 |
+
display: inline;
|
103 |
+
white-space: break-spaces;
|
104 |
+
border-radius: 6px;
|
105 |
+
margin: 0 2px 0 2px;
|
106 |
+
padding: .2em .4em .1em .4em;
|
107 |
+
background-color: rgba(175,184,193,0.2);
|
108 |
+
}
|
109 |
+
/* Code block */
|
110 |
+
#chuanhu_chatbot pre code {
|
111 |
+
display: block;
|
112 |
+
overflow: auto;
|
113 |
+
white-space: pre;
|
114 |
+
background-color: hsla(0, 0%, 0%, 80%)!important;
|
115 |
+
border-radius: 10px;
|
116 |
+
padding: 1.4em 1.2em 0em 1.4em;
|
117 |
+
margin: 1.2em 2em 1.2em 0.5em;
|
118 |
+
color: #FFF;
|
119 |
+
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
|
120 |
+
}
|
121 |
+
/* Hightlight */
|
122 |
+
#chuanhu_chatbot .highlight { background-color: transparent }
|
123 |
+
#chuanhu_chatbot .highlight .hll { background-color: #49483e }
|
124 |
+
#chuanhu_chatbot .highlight .c { color: #75715e } /* Comment */
|
125 |
+
#chuanhu_chatbot .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
|
126 |
+
#chuanhu_chatbot .highlight .k { color: #66d9ef } /* Keyword */
|
127 |
+
#chuanhu_chatbot .highlight .l { color: #ae81ff } /* Literal */
|
128 |
+
#chuanhu_chatbot .highlight .n { color: #f8f8f2 } /* Name */
|
129 |
+
#chuanhu_chatbot .highlight .o { color: #f92672 } /* Operator */
|
130 |
+
#chuanhu_chatbot .highlight .p { color: #f8f8f2 } /* Punctuation */
|
131 |
+
#chuanhu_chatbot .highlight .ch { color: #75715e } /* Comment.Hashbang */
|
132 |
+
#chuanhu_chatbot .highlight .cm { color: #75715e } /* Comment.Multiline */
|
133 |
+
#chuanhu_chatbot .highlight .cp { color: #75715e } /* Comment.Preproc */
|
134 |
+
#chuanhu_chatbot .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
|
135 |
+
#chuanhu_chatbot .highlight .c1 { color: #75715e } /* Comment.Single */
|
136 |
+
#chuanhu_chatbot .highlight .cs { color: #75715e } /* Comment.Special */
|
137 |
+
#chuanhu_chatbot .highlight .gd { color: #f92672 } /* Generic.Deleted */
|
138 |
+
#chuanhu_chatbot .highlight .ge { font-style: italic } /* Generic.Emph */
|
139 |
+
#chuanhu_chatbot .highlight .gi { color: #a6e22e } /* Generic.Inserted */
|
140 |
+
#chuanhu_chatbot .highlight .gs { font-weight: bold } /* Generic.Strong */
|
141 |
+
#chuanhu_chatbot .highlight .gu { color: #75715e } /* Generic.Subheading */
|
142 |
+
#chuanhu_chatbot .highlight .kc { color: #66d9ef } /* Keyword.Constant */
|
143 |
+
#chuanhu_chatbot .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
|
144 |
+
#chuanhu_chatbot .highlight .kn { color: #f92672 } /* Keyword.Namespace */
|
145 |
+
#chuanhu_chatbot .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
|
146 |
+
#chuanhu_chatbot .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
|
147 |
+
#chuanhu_chatbot .highlight .kt { color: #66d9ef } /* Keyword.Type */
|
148 |
+
#chuanhu_chatbot .highlight .ld { color: #e6db74 } /* Literal.Date */
|
149 |
+
#chuanhu_chatbot .highlight .m { color: #ae81ff } /* Literal.Number */
|
150 |
+
#chuanhu_chatbot .highlight .s { color: #e6db74 } /* Literal.String */
|
151 |
+
#chuanhu_chatbot .highlight .na { color: #a6e22e } /* Name.Attribute */
|
152 |
+
#chuanhu_chatbot .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
|
153 |
+
#chuanhu_chatbot .highlight .nc { color: #a6e22e } /* Name.Class */
|
154 |
+
#chuanhu_chatbot .highlight .no { color: #66d9ef } /* Name.Constant */
|
155 |
+
#chuanhu_chatbot .highlight .nd { color: #a6e22e } /* Name.Decorator */
|
156 |
+
#chuanhu_chatbot .highlight .ni { color: #f8f8f2 } /* Name.Entity */
|
157 |
+
#chuanhu_chatbot .highlight .ne { color: #a6e22e } /* Name.Exception */
|
158 |
+
#chuanhu_chatbot .highlight .nf { color: #a6e22e } /* Name.Function */
|
159 |
+
#chuanhu_chatbot .highlight .nl { color: #f8f8f2 } /* Name.Label */
|
160 |
+
#chuanhu_chatbot .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
|
161 |
+
#chuanhu_chatbot .highlight .nx { color: #a6e22e } /* Name.Other */
|
162 |
+
#chuanhu_chatbot .highlight .py { color: #f8f8f2 } /* Name.Property */
|
163 |
+
#chuanhu_chatbot .highlight .nt { color: #f92672 } /* Name.Tag */
|
164 |
+
#chuanhu_chatbot .highlight .nv { color: #f8f8f2 } /* Name.Variable */
|
165 |
+
#chuanhu_chatbot .highlight .ow { color: #f92672 } /* Operator.Word */
|
166 |
+
#chuanhu_chatbot .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
|
167 |
+
#chuanhu_chatbot .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
|
168 |
+
#chuanhu_chatbot .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
|
169 |
+
#chuanhu_chatbot .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
|
170 |
+
#chuanhu_chatbot .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
|
171 |
+
#chuanhu_chatbot .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
|
172 |
+
#chuanhu_chatbot .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
|
173 |
+
#chuanhu_chatbot .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
|
174 |
+
#chuanhu_chatbot .highlight .sc { color: #e6db74 } /* Literal.String.Char */
|
175 |
+
#chuanhu_chatbot .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
|
176 |
+
#chuanhu_chatbot .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
|
177 |
+
#chuanhu_chatbot .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
|
178 |
+
#chuanhu_chatbot .highlight .se { color: #ae81ff } /* Literal.String.Escape */
|
179 |
+
#chuanhu_chatbot .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
|
180 |
+
#chuanhu_chatbot .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
|
181 |
+
#chuanhu_chatbot .highlight .sx { color: #e6db74 } /* Literal.String.Other */
|
182 |
+
#chuanhu_chatbot .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
|
183 |
+
#chuanhu_chatbot .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
|
184 |
+
#chuanhu_chatbot .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
|
185 |
+
#chuanhu_chatbot .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
|
186 |
+
#chuanhu_chatbot .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
|
187 |
+
#chuanhu_chatbot .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
|
188 |
+
#chuanhu_chatbot .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
|
189 |
+
#chuanhu_chatbot .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
|
190 |
+
#chuanhu_chatbot .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
|
191 |
+
#chuanhu_chatbot .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
|
custom.js
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
// custom javascript here
|
presets.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
|
5 |
+
title = """<h1 align="left" style="min-width:200px; margin-top:0;"> KI am LI </h1>"""
|
6 |
+
description_top = """\
|
7 |
+
<div align="left">
|
8 |
+
<p> Hinterlegtes KI-Model: EleutherAI/gpt-neo-1.3B</p>
|
9 |
+
<p>
|
10 |
+
Disclaimer: Das KI-Model, welches hier verwendet wird, kommt vom Hugging Face Model-Hub und ist ein Open-Source Model.
|
11 |
+
Diese Demo darf nicht für komerzielle genutzt werden!
|
12 |
+
Der Output des Models ist nicht zensiert und die Authoren der KI stimmen nicht unbedingt mit den Inhalten überein.
|
13 |
+
Gebrauch auf eigenes Risiko.
|
14 |
+
</p >
|
15 |
+
</div>
|
16 |
+
"""
|
17 |
+
description = """\
|
18 |
+
<div align="center" style="margin:16px 0">
|
19 |
+
Diese Demo setzt auf auf dem Open-Source Model 'EleutherAI/gpt-neo-1.3B' von HuggingChat.
|
20 |
+
</div>
|
21 |
+
"""
|
22 |
+
CONCURRENT_COUNT = 100
|
23 |
+
|
24 |
+
|
25 |
+
ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
|
26 |
+
|
27 |
+
small_and_beautiful_theme = gr.themes.Soft(
|
28 |
+
primary_hue=gr.themes.Color(
|
29 |
+
c50="#02C160",
|
30 |
+
c100="rgba(2, 193, 96, 0.2)",
|
31 |
+
c200="#02C160",
|
32 |
+
c300="rgba(2, 193, 96, 0.32)",
|
33 |
+
c400="rgba(2, 193, 96, 0.32)",
|
34 |
+
c500="rgba(2, 193, 96, 1.0)",
|
35 |
+
c600="rgba(2, 193, 96, 1.0)",
|
36 |
+
c700="rgba(2, 193, 96, 0.32)",
|
37 |
+
c800="rgba(2, 193, 96, 0.32)",
|
38 |
+
c900="#02C160",
|
39 |
+
c950="#02C160",
|
40 |
+
),
|
41 |
+
secondary_hue=gr.themes.Color(
|
42 |
+
c50="#576b95",
|
43 |
+
c100="#576b95",
|
44 |
+
c200="#576b95",
|
45 |
+
c300="#576b95",
|
46 |
+
c400="#576b95",
|
47 |
+
c500="#576b95",
|
48 |
+
c600="#576b95",
|
49 |
+
c700="#576b95",
|
50 |
+
c800="#576b95",
|
51 |
+
c900="#576b95",
|
52 |
+
c950="#576b95",
|
53 |
+
),
|
54 |
+
neutral_hue=gr.themes.Color(
|
55 |
+
name="gray",
|
56 |
+
c50="#f9fafb",
|
57 |
+
c100="#f3f4f6",
|
58 |
+
c200="#e5e7eb",
|
59 |
+
c300="#d1d5db",
|
60 |
+
c400="#B2B2B2",
|
61 |
+
c500="#808080",
|
62 |
+
c600="#636363",
|
63 |
+
c700="#515151",
|
64 |
+
c800="#393939",
|
65 |
+
c900="#272727",
|
66 |
+
c950="#171717",
|
67 |
+
),
|
68 |
+
radius_size=gr.themes.sizes.radius_sm,
|
69 |
+
).set(
|
70 |
+
button_primary_background_fill="#06AE56",
|
71 |
+
button_primary_background_fill_dark="#06AE56",
|
72 |
+
button_primary_background_fill_hover="#07C863",
|
73 |
+
button_primary_border_color="#06AE56",
|
74 |
+
button_primary_border_color_dark="#06AE56",
|
75 |
+
button_primary_text_color="#FFFFFF",
|
76 |
+
button_primary_text_color_dark="#FFFFFF",
|
77 |
+
button_secondary_background_fill="#F2F2F2",
|
78 |
+
button_secondary_background_fill_dark="#2B2B2B",
|
79 |
+
button_secondary_text_color="#393939",
|
80 |
+
button_secondary_text_color_dark="#FFFFFF",
|
81 |
+
# background_fill_primary="#F7F7F7",
|
82 |
+
# background_fill_primary_dark="#1F1F1F",
|
83 |
+
block_title_text_color="*primary_500",
|
84 |
+
block_title_background_fill="*primary_100",
|
85 |
+
input_background_fill="#F6F6F6",
|
86 |
+
)
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
torch
|
3 |
+
accelerate
|
4 |
+
transformers==4.29.1
|
5 |
+
sentencepiece #fast Tokenizer - necessary, wenn torch_dtype=torch.float16 angeschaltet ist bei funktion load_tokenizer_and_model
|
6 |
+
|
7 |
+
|
8 |
+
|
utils.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
|
3 |
+
import logging
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import datetime
|
7 |
+
import hashlib
|
8 |
+
import csv
|
9 |
+
import requests
|
10 |
+
import re
|
11 |
+
import html
|
12 |
+
import torch
|
13 |
+
import sys
|
14 |
+
import gc
|
15 |
+
from pygments.lexers import guess_lexer, ClassNotFound
|
16 |
+
import gradio as gr
|
17 |
+
from pygments import highlight
|
18 |
+
from pygments.lexers import guess_lexer,get_lexer_by_name
|
19 |
+
from pygments.formatters import HtmlFormatter
|
20 |
+
import transformers
|
21 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
22 |
+
|
23 |
+
|
24 |
+
def reset_state():
|
25 |
+
return [], [], "Reset Done"
|
26 |
+
|
27 |
+
def reset_textbox():
|
28 |
+
return gr.update(value=""),""
|
29 |
+
|
30 |
+
def cancel_outputing():
|
31 |
+
return "Stop Done"
|
32 |
+
|
33 |
+
def transfer_input(inputs):
|
34 |
+
textbox = reset_textbox()
|
35 |
+
return (
|
36 |
+
inputs,
|
37 |
+
gr.update(value=""),
|
38 |
+
gr.Button.update(visible=True),
|
39 |
+
)
|
40 |
+
|
41 |
+
def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
|
42 |
+
for stop_word in stop_words:
|
43 |
+
if s.endswith(stop_word):
|
44 |
+
return True
|
45 |
+
for i in range(1, len(stop_word)):
|
46 |
+
if s.endswith(stop_word[:i]):
|
47 |
+
return True
|
48 |
+
return False
|
49 |
+
|
50 |
+
def generate_prompt_with_history(text, history, tokenizer, max_length=2048):
|
51 |
+
#prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
|
52 |
+
prompt = "Das folgende ist eine Unterhaltung zwischen einem Menschen und einem KI-Assistenten, der Baize genannt wird. Baize ist ein open-source KI-Assistent, der von UCSD entwickelt wurde. Der Mensch und der KI-Assistent chatten abwechselnd miteinander. Die Antworten des KI Assistenten sind immer so ausführlich wie möglich und in Markdown Schreibweise. Die Antworten des KI-Assistenten vermeiden Themen und Antworten zu unethischen, kontroversen oder sensiblen Themen. Die Antworten sind immer sehr höflich formuliert..\n[|Human|]Hallo!\n[|AI|]Hi!"
|
53 |
+
history = ["\n[|Human|]{}\n[|AI|]{}".format(x[0],x[1]) for x in history]
|
54 |
+
history.append("\n[|Human|]{}\n[|AI|]".format(text))
|
55 |
+
history_text = ""
|
56 |
+
flag = False
|
57 |
+
for x in history[::-1]:
|
58 |
+
if tokenizer(prompt+history_text+x, return_tensors="pt")['input_ids'].size(-1) <= max_length:
|
59 |
+
history_text = x + history_text
|
60 |
+
flag = True
|
61 |
+
else:
|
62 |
+
break
|
63 |
+
if flag:
|
64 |
+
return prompt+history_text,tokenizer(prompt+history_text, return_tensors="pt")
|
65 |
+
else:
|
66 |
+
return None
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
def load_tokenizer_and_model(base_model,load_8bit=False):
|
71 |
+
if torch.cuda.is_available():
|
72 |
+
device = "cuda"
|
73 |
+
else:
|
74 |
+
device = "cpu"
|
75 |
+
|
76 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model, use_fast = False)
|
77 |
+
if device == "cuda":
|
78 |
+
model = AutoModelForCausalLM.from_pretrained(
|
79 |
+
base_model,
|
80 |
+
load_in_8bit=load_8bit,
|
81 |
+
torch_dtype=torch.float16,
|
82 |
+
device_map="auto",
|
83 |
+
)
|
84 |
+
else:
|
85 |
+
model = AutoModelForCausalLM.from_pretrained(
|
86 |
+
base_model, device_map={"": device}, low_cpu_mem_usage=True
|
87 |
+
)
|
88 |
+
|
89 |
+
#if not load_8bit:
|
90 |
+
#model.half() # seems to fix bugs for some users.
|
91 |
+
|
92 |
+
model.eval()
|
93 |
+
return tokenizer,model,device
|
94 |
+
|
95 |
+
# Greedy Search
|
96 |
+
def greedy_search(input_ids: torch.Tensor,
|
97 |
+
model: torch.nn.Module,
|
98 |
+
tokenizer: transformers.PreTrainedTokenizer,
|
99 |
+
stop_words: list,
|
100 |
+
max_length: int,
|
101 |
+
temperature: float = 1.0,
|
102 |
+
top_p: float = 1.0,
|
103 |
+
top_k: int = 25) -> Iterator[str]:
|
104 |
+
generated_tokens = []
|
105 |
+
past_key_values = None
|
106 |
+
current_length = 1
|
107 |
+
for i in range(max_length):
|
108 |
+
with torch.no_grad():
|
109 |
+
if past_key_values is None:
|
110 |
+
outputs = model(input_ids)
|
111 |
+
else:
|
112 |
+
outputs = model(input_ids[:, -1:], past_key_values=past_key_values)
|
113 |
+
logits = outputs.logits[:, -1, :]
|
114 |
+
past_key_values = outputs.past_key_values
|
115 |
+
|
116 |
+
# apply temperature
|
117 |
+
logits /= temperature
|
118 |
+
|
119 |
+
probs = torch.softmax(logits, dim=-1)
|
120 |
+
# apply top_p
|
121 |
+
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
|
122 |
+
probs_sum = torch.cumsum(probs_sort, dim=-1)
|
123 |
+
mask = probs_sum - probs_sort > top_p
|
124 |
+
probs_sort[mask] = 0.0
|
125 |
+
|
126 |
+
# apply top_k
|
127 |
+
#if top_k is not None:
|
128 |
+
# probs_sort1, _ = torch.topk(probs_sort, top_k)
|
129 |
+
# min_top_probs_sort = torch.min(probs_sort1, dim=-1, keepdim=True).values
|
130 |
+
# probs_sort = torch.where(probs_sort < min_top_probs_sort, torch.full_like(probs_sort, float(0.0)), probs_sort)
|
131 |
+
|
132 |
+
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
|
133 |
+
next_token = torch.multinomial(probs_sort, num_samples=1)
|
134 |
+
next_token = torch.gather(probs_idx, -1, next_token)
|
135 |
+
|
136 |
+
input_ids = torch.cat((input_ids, next_token), dim=-1)
|
137 |
+
|
138 |
+
generated_tokens.append(next_token[0].item())
|
139 |
+
text = tokenizer.decode(generated_tokens)
|
140 |
+
|
141 |
+
yield text
|
142 |
+
if any([x in text for x in stop_words]):
|
143 |
+
del past_key_values
|
144 |
+
del logits
|
145 |
+
del probs
|
146 |
+
del probs_sort
|
147 |
+
del probs_idx
|
148 |
+
del probs_sum
|
149 |
+
gc.collect()
|
150 |
+
return
|
151 |
+
|
152 |
+
def convert_to_markdown(text):
|
153 |
+
text = text.replace("$","$")
|
154 |
+
def replace_leading_tabs_and_spaces(line):
|
155 |
+
new_line = []
|
156 |
+
|
157 |
+
for char in line:
|
158 |
+
if char == "\t":
|
159 |
+
new_line.append("	")
|
160 |
+
elif char == " ":
|
161 |
+
new_line.append(" ")
|
162 |
+
else:
|
163 |
+
break
|
164 |
+
return "".join(new_line) + line[len(new_line):]
|
165 |
+
|
166 |
+
markdown_text = ""
|
167 |
+
lines = text.split("\n")
|
168 |
+
in_code_block = False
|
169 |
+
|
170 |
+
for line in lines:
|
171 |
+
if in_code_block is False and line.startswith("```"):
|
172 |
+
in_code_block = True
|
173 |
+
markdown_text += f"{line}\n"
|
174 |
+
elif in_code_block is True and line.startswith("```"):
|
175 |
+
in_code_block = False
|
176 |
+
markdown_text += f"{line}\n"
|
177 |
+
elif in_code_block:
|
178 |
+
markdown_text += f"{line}\n"
|
179 |
+
else:
|
180 |
+
line = replace_leading_tabs_and_spaces(line)
|
181 |
+
line = re.sub(r"^(#)", r"\\\1", line)
|
182 |
+
markdown_text += f"{line} \n"
|
183 |
+
|
184 |
+
return markdown_text
|
185 |
+
|
186 |
+
|
187 |
+
class State:
|
188 |
+
interrupted = False
|
189 |
+
|
190 |
+
def interrupt(self):
|
191 |
+
self.interrupted = True
|
192 |
+
|
193 |
+
def recover(self):
|
194 |
+
self.interrupted = False
|
195 |
+
shared_state = State()
|