mikeee commited on
Commit
b54c00e
0 Parent(s):

Duplicate from mikeee/WizardLM-13B-V1.0-Uncensored-GGML

Browse files
Files changed (8) hide show
  1. .flake8- +21 -0
  2. .gitattributes +35 -0
  3. .gitignore +11 -0
  4. .ruff.toml +17 -0
  5. .stignore +102 -0
  6. README.md +13 -0
  7. app.py +485 -0
  8. requirements.txt +6 -0
.flake8- ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore =
3
+ # E203 whitespace before ':'
4
+ E203
5
+ D203
6
+ # line too long
7
+ E501
8
+ per-file-ignores =
9
+ # imported but unused
10
+ # __init__.py: F401
11
+ test_*.py: F401
12
+ exclude =
13
+ .git
14
+ __pycache__
15
+ docs/source/conf.py
16
+ old
17
+ build
18
+ dist
19
+ .venv
20
+ pad*.py app-.py
21
+ max-complexity = 25
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ call-activate.bat
2
+ okteto.yml
3
+ okteto-up.bat
4
+ install-sw.sh
5
+ install-sw1.sh
6
+ start-sshd.sh
7
+ pyproject.toml
8
+ models
9
+ .ruff_cache
10
+ run-nodemon.sh
11
+ app-.py
.ruff.toml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Assume Python 3.10.
2
+ target-version = "py310"
3
+ # Decrease the maximum line length to 79 characters.
4
+ line-length = 300
5
+
6
+ # pyflakes, pycodestyle, isort
7
+ # flake8 YTT, pydocstyle D, pylint PLC
8
+ select = ["F", "E", "W", "I001", "YTT", "D", "PLC"]
9
+ # select = ["ALL"]
10
+
11
+ # D103 Missing docstring in public function
12
+ # D101 Missing docstring in public class
13
+ # `multi-line-summary-first-line` (D212)
14
+ # `one-blank-line-before-class` (D203)
15
+ extend-ignore = ["D103", "D101", "D212", "D203"]
16
+
17
+ exclude = [".venv"]
.stignore ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ models
2
+ .git
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build
14
+ develop-eggs
15
+ dist
16
+ downloads
17
+ eggs
18
+ .eggs
19
+ lib
20
+ lib64
21
+ parts
22
+ sdist
23
+ var
24
+ wheels
25
+ pip-wheel-metadata
26
+ share/python-wheels
27
+ *.egg-info
28
+ .installed.cfg
29
+ *.egg
30
+ MANIFEST
31
+
32
+ # PyInstaller
33
+ # Usually these files are written by a python script from a template
34
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
35
+ *.manifest
36
+ *.spec
37
+
38
+ # Installer logs
39
+ pip-log.txt
40
+ pip-delete-this-directory.txt
41
+
42
+ # Translations
43
+ *.mo
44
+ *.pot
45
+
46
+ # Django stuff:
47
+ *.log
48
+ local_settings.py
49
+ db.sqlite3
50
+
51
+ # Flask stuff:
52
+ instance
53
+ .webassets-cache
54
+
55
+ # Scrapy stuff:
56
+ .scrapy
57
+
58
+ # Sphinx documentation
59
+ docs/_build
60
+
61
+ # PyBuilder
62
+ target
63
+
64
+ # Jupyter Notebook
65
+ .ipynb_checkpoints
66
+
67
+ # IPython
68
+ profile_default
69
+ ipython_config.py
70
+
71
+ # pyenv
72
+ .python-version
73
+
74
+ # celery beat schedule file
75
+ celerybeat-schedule
76
+
77
+ # SageMath parsed files
78
+ *.sage.py
79
+
80
+ # Environments
81
+ .env
82
+ .venv
83
+ env
84
+ venv
85
+ ENV
86
+ env.bak
87
+ venv.bak
88
+
89
+ # Spyder project settings
90
+ .spyderproject
91
+ .spyproject
92
+
93
+ # Rope project settings
94
+ .ropeproject
95
+
96
+ # mypy
97
+ .mypy_cache
98
+ .dmypy.json
99
+ dmypy.json
100
+
101
+ # Pyre type checker
102
+ .pyre
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: TheBloke/WizardLM-13B-V1.0-Uncensored-GGML
3
+ emoji: 🚀
4
+ colorFrom: green
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: mikeee/WizardLM-13B-V1.0-Uncensored-GGML
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Run codes."""
2
+ # pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
3
+ # ruff: noqa: E501
4
+ import os
5
+ import time
6
+ from dataclasses import asdict, dataclass
7
+ from pathlib import Path
8
+ from types import SimpleNamespace
9
+
10
+ import gradio as gr
11
+ from about_time import about_time
12
+
13
+ # from ctransformers import AutoConfig, AutoModelForCausalLM
14
+ from ctransformers import AutoModelForCausalLM
15
+ from huggingface_hub import hf_hub_download
16
+ from loguru import logger
17
+
18
+ os.environ["TZ"] = "Asia/Shanghai"
19
+ try:
20
+ time.tzset() # type: ignore # pylint: disable=no-member
21
+ except Exception:
22
+ # Windows
23
+ logger.warning("Windows, cant run time.tzset()")
24
+
25
+ ns = SimpleNamespace(
26
+ response="",
27
+ generator=[],
28
+ )
29
+
30
+ default_system_prompt = "A conversation between a user and an LLM-based AI assistant named Local Assistant. Local Assistant gives helpful and honest answers."
31
+
32
+ user_prefix = "[user]: "
33
+ assistant_prefix = "[assistant]: "
34
+
35
+
36
+ def predict_str(prompt, bot): # bot is in fact bot_history
37
+ # logger.debug(f"{prompt=}, {bot=}, {timeout=}")
38
+
39
+ if bot is None:
40
+ bot = []
41
+
42
+ logger.debug(f"{prompt=}, {bot=}")
43
+
44
+ try:
45
+ # user_prompt = prompt
46
+ generator = generate(
47
+ LLM,
48
+ GENERATION_CONFIG,
49
+ system_prompt=default_system_prompt,
50
+ user_prompt=prompt.strip(),
51
+ )
52
+
53
+ ns.generator = generator # for .then
54
+
55
+ except Exception as exc:
56
+ logger.error(exc)
57
+
58
+ # bot.append([prompt, f"{response} {_}"])
59
+ # return prompt, bot
60
+
61
+ return prompt, bot + [[prompt, None]]
62
+
63
+
64
+ def bot_str(bot):
65
+ if bot:
66
+ bot[-1][1] = ""
67
+ else:
68
+ bot = [["Something is wrong", ""]]
69
+
70
+ print(assistant_prefix, end=" ", flush=True)
71
+
72
+ response = ""
73
+
74
+ flag = 1
75
+ then = time.time()
76
+ for word in ns.generator:
77
+ # record first response time
78
+ if flag:
79
+ logger.debug(f"\t {time.time() - then:.1f}s")
80
+ flag = 0
81
+ print(word, end="", flush=True)
82
+ # print(word, flush=True) # vertical stream
83
+ response += word
84
+ bot[-1][1] = response
85
+ yield bot
86
+
87
+
88
+ def predict(prompt, bot):
89
+ # logger.debug(f"{prompt=}, {bot=}, {timeout=}")
90
+ logger.debug(f"{prompt=}, {bot=}")
91
+
92
+ ns.response = ""
93
+ then = time.time()
94
+ with about_time() as atime: # type: ignore
95
+ try:
96
+ # user_prompt = prompt
97
+ generator = generate(
98
+ LLM,
99
+ GENERATION_CONFIG,
100
+ system_prompt=default_system_prompt,
101
+ user_prompt=prompt.strip(),
102
+ )
103
+
104
+ ns.generator = generator # for .then
105
+
106
+ print(assistant_prefix, end=" ", flush=True)
107
+
108
+ response = ""
109
+ buff.update(value="diggin...")
110
+
111
+ flag = 1
112
+ for word in generator:
113
+ # record first response time
114
+ if flag:
115
+ logger.debug(f"\t {time.time() - then:.1f}s")
116
+ flag = 0
117
+ # print(word, end="", flush=True)
118
+ print(word, flush=True) # vertical stream
119
+ response += word
120
+ ns.response = response
121
+ buff.update(value=response)
122
+ print("")
123
+ logger.debug(f"{response=}")
124
+ except Exception as exc:
125
+ logger.error(exc)
126
+ response = f"{exc=}"
127
+
128
+ # bot = {"inputs": [response]}
129
+ _ = (
130
+ f"(time elapsed: {atime.duration_human}, " # type: ignore
131
+ f"{atime.duration/(len(prompt) + len(response)):.1f}s/char)" # type: ignore
132
+ )
133
+
134
+ bot.append([prompt, f"{response} {_}"])
135
+
136
+ return prompt, bot
137
+
138
+
139
+ def predict_api(prompt):
140
+ logger.debug(f"{prompt=}")
141
+ ns.response = ""
142
+ try:
143
+ # user_prompt = prompt
144
+ _ = GenerationConfig(
145
+ temperature=0.2,
146
+ top_k=0,
147
+ top_p=0.9,
148
+ repetition_penalty=1.0,
149
+ max_new_tokens=512, # adjust as needed
150
+ seed=42,
151
+ reset=False, # reset history (cache)
152
+ stream=True, # TODO stream=False and generator
153
+ threads=os.cpu_count() // 2, # type: ignore # adjust for your CPU
154
+ stop=["<|im_end|>", "|<"],
155
+ )
156
+
157
+ # TODO: stream does not make sense in api?
158
+ generator = generate(
159
+ LLM, _, system_prompt=default_system_prompt, user_prompt=prompt.strip()
160
+ )
161
+ print(assistant_prefix, end=" ", flush=True)
162
+
163
+ response = ""
164
+ buff.update(value="diggin...")
165
+ for word in generator:
166
+ print(word, end="", flush=True)
167
+ response += word
168
+ ns.response = response
169
+ buff.update(value=response)
170
+ print("")
171
+ logger.debug(f"{response=}")
172
+ except Exception as exc:
173
+ logger.error(exc)
174
+ response = f"{exc=}"
175
+ # bot = {"inputs": [response]}
176
+ # bot = [(prompt, response)]
177
+
178
+ return response
179
+
180
+
181
+ def download_quant(destination_folder: str, repo_id: str, model_filename: str):
182
+ local_path = os.path.abspath(destination_folder)
183
+ return hf_hub_download(
184
+ repo_id=repo_id,
185
+ filename=model_filename,
186
+ local_dir=local_path,
187
+ local_dir_use_symlinks=True,
188
+ )
189
+
190
+
191
+ @dataclass
192
+ class GenerationConfig:
193
+ temperature: float
194
+ top_k: int
195
+ top_p: float
196
+ repetition_penalty: float
197
+ max_new_tokens: int
198
+ seed: int
199
+ reset: bool
200
+ stream: bool
201
+ threads: int
202
+ stop: list[str]
203
+
204
+
205
+ def format_prompt(system_prompt: str, user_prompt: str):
206
+ """Format prompt based on: https://huggingface.co/spaces/mosaicml/mpt-30b-chat/blob/main/app.py."""
207
+ # TODO im_start/im_end possible fix for WizardCoder
208
+
209
+ system_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n"
210
+ user_prompt = f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
211
+ assistant_prompt = "<|im_start|>assistant\n"
212
+
213
+ return f"{system_prompt}{user_prompt}{assistant_prompt}"
214
+
215
+
216
+ def generate(
217
+ llm: AutoModelForCausalLM,
218
+ generation_config: GenerationConfig,
219
+ system_prompt: str = default_system_prompt,
220
+ user_prompt: str = "",
221
+ ):
222
+ """Run model inference, will return a Generator if streaming is true."""
223
+ # if not user_prompt.strip():
224
+ return llm(
225
+ format_prompt(
226
+ system_prompt,
227
+ user_prompt,
228
+ ),
229
+ **asdict(generation_config),
230
+ )
231
+
232
+
233
+ _ = """full url: https://huggingface.co/TheBloke/mpt-30B-chat-GGML/blob/main/mpt-30b-chat.ggmlv0.q4_1.bin"""
234
+
235
+ # https://huggingface.co/TheBloke/mpt-30B-chat-GGML
236
+ _ = """
237
+ mpt-30b-chat.ggmlv0.q4_0.bin q4_0 4 16.85 GB 19.35 GB 4-bit.
238
+ mpt-30b-chat.ggmlv0.q4_1.bin q4_1 4 18.73 GB 21.23 GB 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.
239
+ mpt-30b-chat.ggmlv0.q5_0.bin q5_0 5 20.60 GB 23.10 GB
240
+ mpt-30b-chat.ggmlv0.q5_1.bin q5_1 5 22.47 GB 24.97 GB
241
+ mpt-30b-chat.ggmlv0.q8_0.bin q8_0 8 31.83 GB 34.33 GB
242
+ """
243
+ MODEL_FILENAME = "mpt-30b-chat.ggmlv0.q4_1.bin"
244
+ MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_0.bin" # 10.7G
245
+ MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_1.bin" # 11.9G
246
+ MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_1.bin" # 11.9G
247
+
248
+ # https://huggingface.co/TheBloke/WizardLM-13B-V1.0-Uncensored-GGML
249
+ MODEL_FILENAME = "wizardlm-13b-v1.0-uncensored.ggmlv3.q4_1.bin" # 8.4G
250
+
251
+ DESTINATION_FOLDER = "models"
252
+
253
+ REPO_ID = "TheBloke/mpt-30B-chat-GGML"
254
+ if "WizardCoder" in MODEL_FILENAME:
255
+ REPO_ID = "TheBloke/WizardCoder-15B-1.0-GGML"
256
+
257
+ if "uncensored" in MODEL_FILENAME.lower():
258
+ REPO_ID = "TheBloke/WizardLM-13B-V1.0-Uncensored-GGML"
259
+
260
+ logger.info(f"start dl, {REPO_ID=}, {MODEL_FILENAME=}, {DESTINATION_FOLDER=}")
261
+ download_quant(DESTINATION_FOLDER, REPO_ID, MODEL_FILENAME)
262
+ logger.info("done dl")
263
+
264
+ # if "mpt" in model_filename:
265
+ # config = AutoConfig.from_pretrained("mosaicml/mpt-30b-cha t", context_length=8192)
266
+ # llm = AutoModelForCausalLM.from_pretrained(
267
+ # os.path.abspath(f"models/{model_filename}"),
268
+ # model_type="mpt",
269
+ # config=config,
270
+ # )
271
+
272
+ # https://huggingface.co/spaces/matthoffner/wizardcoder-ggml/blob/main/main.py
273
+ _ = """
274
+ llm = AutoModelForCausalLM.from_pretrained(
275
+ "TheBloke/WizardCoder-15B-1.0-GGML",
276
+ model_file="WizardCoder-15B-1.0.ggmlv3.q4_0.bin",
277
+ model_type="starcoder",
278
+ threads=8
279
+ )
280
+ # """
281
+
282
+ logger.debug(f"{os.cpu_count()=}")
283
+ logger.info("load llm")
284
+
285
+ _ = Path("models", MODEL_FILENAME).absolute().as_posix()
286
+ logger.debug(f"model_file: {_}, exists: {Path(_).exists()}")
287
+ LLM = AutoModelForCausalLM.from_pretrained(
288
+ # "TheBloke/WizardCoder-15B-1.0-GGML",
289
+ REPO_ID, # DESTINATION_FOLDER, # model_path_or_repo_id: str required
290
+ model_file=_,
291
+ model_type="llama", # "starcoder", AutoConfig.from_pretrained("TheBloke/WizardLM-13B-V1.0-Uncensored-GGML")
292
+ threads=os.cpu_count() // 2, # type: ignore
293
+ )
294
+
295
+ logger.info("done load llm")
296
+
297
+ cpu_count = os.cpu_count() // 2 # type: ignore
298
+ logger.debug(f"{cpu_count=}")
299
+
300
+ GENERATION_CONFIG = GenerationConfig(
301
+ temperature=0.2,
302
+ top_k=0,
303
+ top_p=0.9,
304
+ repetition_penalty=1.0,
305
+ max_new_tokens=512, # adjust as needed
306
+ seed=42,
307
+ reset=False, # reset history (cache)
308
+ stream=True, # streaming per word/token
309
+ threads=cpu_count,
310
+ stop=["<|im_end|>", "|<"], # TODO possible fix of stop
311
+ )
312
+
313
+ css = """
314
+ .importantButton {
315
+ background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
316
+ border: none !important;
317
+ }
318
+ .importantButton:hover {
319
+ background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
320
+ border: none !important;
321
+ }
322
+ .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
323
+ .xsmall {font-size: x-small;}
324
+ """
325
+ etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
326
+ examples = [
327
+ ["How to pick a lock? Provide detailed steps."],
328
+ ["Explain the plot of Cinderella in a sentence."],
329
+ [
330
+ "How long does it take to become proficient in French, and what are the best methods for retaining information?"
331
+ ],
332
+ ["What are some common mistakes to avoid when writing code?"],
333
+ ["Build a prompt to generate a beautiful portrait of a horse"],
334
+ ["Suggest four metaphors to describe the benefits of AI"],
335
+ ["Write a pop song about leaving home for the sandy beaches."],
336
+ ["Write a summary demonstrating my ability to tame lions"],
337
+ ["鲁迅和周树人什么关系 说中文"],
338
+ ["鲁迅和周树人什么关系"],
339
+ ["鲁迅和周树人什么关系 用英文回答"],
340
+ ["从前有一头牛,这头牛后面有什么?"],
341
+ ["正无穷大加一大于正无穷大吗?"],
342
+ ["正无穷大加正无穷大大于正无穷大吗?"],
343
+ ["-2的平方根等于什么"],
344
+ ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
345
+ ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
346
+ ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
347
+ [f"{etext} 翻成中文,列出3个版本"],
348
+ [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本"],
349
+ ["假定 1 + 2 = 4, 试求 7 + 8"],
350
+ ["判断一个数是不是质数的 javascript 码"],
351
+ ["实现python 里 range(10)的 javascript 码"],
352
+ ["实现python 里 [*(range(10)]的 javascript 码"],
353
+ ["Erkläre die Handlung von Cinderella in einem Satz."],
354
+ ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch"],
355
+ ]
356
+
357
+ with gr.Blocks(
358
+ # title="mpt-30b-chat-ggml",
359
+ title=f"{MODEL_FILENAME}",
360
+ theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
361
+ css=css,
362
+ ) as block:
363
+ with gr.Accordion("🎈 Info", open=False):
364
+ # gr.HTML(
365
+ # """<center><a href="https://huggingface.co/spaces/mikeee/mpt-30b-chat?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate"></a> and spin a CPU UPGRADE to avoid the queue</center>"""
366
+ # )
367
+ gr.Markdown(
368
+ f"""<h4><center>{MODEL_FILENAME}</center></h4>
369
+ It takes about 100 seconds for the initial reply
370
+ message to appear. Average streaming rate ~1 sec/chat. The bot only speaks English.
371
+
372
+ Most examples are meant for another model. You probably should try to test
373
+ some related prompts.
374
+
375
+ Try to refresh the browser and try again when occasionally errors occur.
376
+
377
+ It takes about >100 seconds to get a response. Restarting the space takes about 2 minutes if the space is asleep due to inactivity. If the space crashes for some reason, it will also take about 2 minutes to restart. You need to refresh the browser to reload the new space.
378
+ """,
379
+ elem_classes="xsmall",
380
+ )
381
+
382
+ # chatbot = gr.Chatbot().style(height=700) # 500
383
+ chatbot = gr.Chatbot(height=700) # 500
384
+ buff = gr.Textbox(show_label=False, visible=False)
385
+ with gr.Row():
386
+ with gr.Column(scale=5):
387
+ msg = gr.Textbox(
388
+ label="Chat Message Box",
389
+ placeholder="Ask me anything (press Enter or click Submit to send)",
390
+ show_label=False,
391
+ ).style(container=False)
392
+ with gr.Column(scale=1, min_width=80):
393
+ with gr.Row():
394
+ submit = gr.Button("Submit", elem_classes="xsmall")
395
+ stop = gr.Button("Stop", visible=False)
396
+ clear = gr.Button("Clear History", visible=True)
397
+ with gr.Row(visible=False):
398
+ with gr.Accordion("Advanced Options:", open=False):
399
+ with gr.Row():
400
+ with gr.Column(scale=2):
401
+ system = gr.Textbox(
402
+ label="System Prompt",
403
+ value=default_system_prompt,
404
+ show_label=False,
405
+ ).style(container=False)
406
+ with gr.Column():
407
+ with gr.Row():
408
+ change = gr.Button("Change System Prompt")
409
+ reset = gr.Button("Reset System Prompt")
410
+
411
+ with gr.Accordion("Example Inputs", open=True):
412
+ examples = gr.Examples(
413
+ examples=examples,
414
+ inputs=[msg],
415
+ examples_per_page=40,
416
+ )
417
+
418
+ # with gr.Row():
419
+ with gr.Accordion("Disclaimer", open=False):
420
+ _ = "-".join(MODEL_FILENAME.split("-")[:2])
421
+ gr.Markdown(
422
+ f"Disclaimer: {_} can produce factually incorrect output, and should not be relied on to produce "
423
+ "factually accurate information. {_} was trained on various public datasets; while great efforts "
424
+ "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
425
+ "biased, or otherwise offensive outputs.",
426
+ elem_classes=["disclaimer"],
427
+ )
428
+ _ = """
429
+ msg.submit(
430
+ # fn=conversation.user_turn,
431
+ fn=predict,
432
+ inputs=[msg, chatbot],
433
+ outputs=[msg, chatbot],
434
+ # queue=True,
435
+ show_progress="full",
436
+ api_name="predict",
437
+ )
438
+ submit.click(
439
+ fn=lambda x, y: ("",) + predict(x, y)[1:], # clear msg
440
+ inputs=[msg, chatbot],
441
+ outputs=[msg, chatbot],
442
+ queue=True,
443
+ show_progress="full",
444
+ )
445
+ # """
446
+ msg.submit(
447
+ # fn=conversation.user_turn,
448
+ fn=predict_str,
449
+ inputs=[msg, chatbot],
450
+ outputs=[msg, chatbot],
451
+ queue=True,
452
+ show_progress="full",
453
+ api_name="predict",
454
+ ).then(bot_str, chatbot, chatbot)
455
+ submit.click(
456
+ fn=lambda x, y: ("",) + predict_str(x, y)[1:], # clear msg
457
+ inputs=[msg, chatbot],
458
+ outputs=[msg, chatbot],
459
+ queue=True,
460
+ show_progress="full",
461
+ ).then(bot_str, chatbot, chatbot)
462
+
463
+ clear.click(lambda: None, None, chatbot, queue=False)
464
+
465
+ # update buff Textbox, every: units in seconds)
466
+ # https://huggingface.co/spaces/julien-c/nvidia-smi/discussions
467
+ # does not work
468
+ # AttributeError: 'Blocks' object has no attribute 'run_forever'
469
+ # block.run_forever(lambda: ns.response, None, [buff], every=1)
470
+
471
+ with gr.Accordion("For Chat/Translation API", open=False, visible=False):
472
+ input_text = gr.Text()
473
+ api_btn = gr.Button("Go", variant="primary")
474
+ out_text = gr.Text()
475
+ api_btn.click(
476
+ predict_api,
477
+ input_text,
478
+ out_text,
479
+ # show_progress="full",
480
+ api_name="api",
481
+ )
482
+
483
+ # concurrency_count=5, max_size=20
484
+ # max_size=36, concurrency_count=14
485
+ block.queue(concurrency_count=5, max_size=20).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ctransformers==0.2.10
2
+ transformers==4.30.2
3
+ huggingface_hub
4
+ gradio
5
+ loguru
6
+ about-time