Spaces:
Sleeping
Sleeping
XiangJinYu
commited on
Commit
•
a95340f
1
Parent(s):
cbce99f
Upload 2 files
Browse files
README.md
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 😻
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 3.
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
|
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Super Cat
|
3 |
emoji: 😻
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.37.0
|
8 |
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: apache-2.0
|
11 |
+
duplicated_from: IsaaXiang/LLaMA-2-CHAT
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
|
2 |
+
# ruff: noqa: E501
|
3 |
+
import os # 导入os模块
|
4 |
+
import platform # 导入platform模块
|
5 |
+
import random # 导入random模块
|
6 |
+
import time # 导入time模块
|
7 |
+
from dataclasses import asdict, dataclass # 从dataclasses模块中导入asdict和dataclass
|
8 |
+
from pathlib import Path # 从pathlib模块中导入Path类
|
9 |
+
|
10 |
+
# from types import SimpleNamespace # 从types模块中导入SimpleNamespace类,但未使用
|
11 |
+
import gradio as gr #导入gradio模块并起别名gr
|
12 |
+
import psutil #导入psutil模块
|
13 |
+
import getpass #导入 getpass模块
|
14 |
+
from about_time import about_time # 从about_time模块中导入about_time函数
|
15 |
+
from ctransformers import AutoModelForCausalLM # 从ctransformers模块中导入AutoModelForCausalLM类
|
16 |
+
from dl_hf_model import dl_hf_model # 从dl_hf_model模块中导入dl_hf_model函数
|
17 |
+
from loguru import logger # 从loguru模块中导入logger
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
filename_list = [ # 定义文件名列表
|
23 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q2_K.bin",
|
24 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_L.bin",
|
25 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_M.bin",
|
26 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q3_K_S.bin",
|
27 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_0.bin",
|
28 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_1.bin",
|
29 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin",
|
30 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_S.bin",
|
31 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_0.bin",
|
32 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin",
|
33 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_M.bin",
|
34 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_K_S.bin",
|
35 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q6_K.bin",
|
36 |
+
"Wizard-Vicuna-7B-Uncensored.ggmlv3.q8_0.bin",
|
37 |
+
]
|
38 |
+
|
39 |
+
URL = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin" # 4.05G
|
40 |
+
|
41 |
+
#url = "https://huggingface.co/savvamadar/ggml-gpt4all-j-v1.3-groovy/blob/main/ggml-gpt4all-j-v1.3-groovy.bin"
|
42 |
+
url = "https://huggingface.co/TheBloke/Llama-2-13B-GGML/blob/main/llama-2-13b.ggmlv3.q4_K_S.bin" # 7.37G
|
43 |
+
url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin"
|
44 |
+
url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.bin" # 6.93G
|
45 |
+
url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q3_K_L.binhttps://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q4_K_M.bin" # 7.87G
|
46 |
+
|
47 |
+
url = "https://huggingface.co/localmodels/Llama-2-13B-Chat-ggml/blob/main/llama-2-13b-chat.ggmlv3.q4_K_S.bin" # 7.37G
|
48 |
+
|
49 |
+
_ = ( # 定义一个判断是否在特定环境的标志
|
50 |
+
"golay" in platform.node()
|
51 |
+
or "okteto" in platform.node()
|
52 |
+
or Path("/kaggle").exists()
|
53 |
+
# or psutil.cpu_count(logical=False) < 4
|
54 |
+
or 1 # run 7b in hf
|
55 |
+
)
|
56 |
+
|
57 |
+
if _: # 如果在特定环境中
|
58 |
+
url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin"
|
59 |
+
# url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin" # 2.87G
|
60 |
+
# url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
|
61 |
+
|
62 |
+
|
63 |
+
prompt_template = """[INST] <<SYS>>
|
64 |
+
You are a cute kitten,Speak no more than 50 words at a time.
|
65 |
+
<</SYS>>
|
66 |
+
|
67 |
+
{question} [/INST]
|
68 |
+
"""
|
69 |
+
|
70 |
+
|
71 |
+
_ = psutil.cpu_count(logical=False) - 1 # 获取CPU物理核心数减1
|
72 |
+
cpu_count: int = int(_) if _ else 1 # 如果上一步结果小于0则为1
|
73 |
+
logger.debug(f"{cpu_count=}") # 打印CPU核心数
|
74 |
+
|
75 |
+
LLM = None # 声明LLM变量
|
76 |
+
|
77 |
+
try:
|
78 |
+
model_loc, file_size = dl_hf_model(url) # 从url下载模型到本地
|
79 |
+
except Exception as exc_:
|
80 |
+
logger.error(exc_) # 打印错误
|
81 |
+
raise SystemExit(1) from exc_ # 如果下载失败则退出
|
82 |
+
|
83 |
+
LLM = AutoModelForCausalLM.from_pretrained( # 初始化LLM模型
|
84 |
+
model_loc,
|
85 |
+
model_type="llama",
|
86 |
+
# threads=cpu_count,
|
87 |
+
)
|
88 |
+
|
89 |
+
logger.info(f"done load llm {model_loc=} {file_size=}G") # 打印加载模型信息
|
90 |
+
|
91 |
+
os.environ["TZ"] = "Asia/Shanghai" # 设置时区为上海
|
92 |
+
try:
|
93 |
+
time.tzset() # type: ignore # pylint: disable=no-member # 尝试应用时区设置
|
94 |
+
except Exception:
|
95 |
+
# Windows
|
96 |
+
logger.warning("Windows, cant run time.tzset()") # windows不支持tzset打印提示
|
97 |
+
|
98 |
+
_ = """
|
99 |
+
ns = SimpleNamespace(
|
100 |
+
response="",
|
101 |
+
generator=(_ for _ in []),
|
102 |
+
)
|
103 |
+
# """
|
104 |
+
|
105 |
+
@dataclass # 定义数据类
|
106 |
+
class GenerationConfig:
|
107 |
+
temperature: float = 0.7
|
108 |
+
top_k: int = 50
|
109 |
+
top_p: float = 0.9
|
110 |
+
repetition_penalty: float = 1.0
|
111 |
+
max_new_tokens: int = 512
|
112 |
+
seed: int = 42
|
113 |
+
reset: bool = False
|
114 |
+
stream: bool = True
|
115 |
+
# threads: int = cpu_count
|
116 |
+
# stop: list[str] = field(default_factory=lambda: [stop_string])
|
117 |
+
|
118 |
+
|
119 |
+
def generate( # 定义生成函数
|
120 |
+
question: str,
|
121 |
+
llm=LLM,
|
122 |
+
config: GenerationConfig = GenerationConfig(),
|
123 |
+
):
|
124 |
+
"""Run model inference, will return a Generator if streaming is true."""
|
125 |
+
# _ = prompt_template.format(question=question)
|
126 |
+
# print(_)
|
127 |
+
|
128 |
+
prompt = prompt_template.format(question=question) # 填充prompt
|
129 |
+
|
130 |
+
return llm( # 调用LLM模型
|
131 |
+
prompt,
|
132 |
+
**asdict(config),
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
logger.debug(f"{asdict(GenerationConfig())=}") # 打印默认生成配置
|
137 |
+
|
138 |
+
|
139 |
+
def user(user_message, history): # 定义user函数处理用户输入
|
140 |
+
# return user_message, history + [[user_message, None]]
|
141 |
+
history.append([user_message, None]) # 在history中追加用户输入
|
142 |
+
return user_message, history # keep user_message
|
143 |
+
|
144 |
+
|
145 |
+
def user1(user_message, history): # 定义user1函数处理用户输入
|
146 |
+
# return user_message, history + [[user_message, None]]
|
147 |
+
history.append([user_message, None]) # 在history中追加用户输入
|
148 |
+
return "", history # clear user_message
|
149 |
+
|
150 |
+
|
151 |
+
def bot_(history): # 定义bot_函数生成回复
|
152 |
+
user_message = history[-1][0]
|
153 |
+
resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
|
154 |
+
bot_message = user_message + ": " + resp
|
155 |
+
history[-1][1] = ""
|
156 |
+
for character in bot_message:
|
157 |
+
history[-1][1] += character
|
158 |
+
time.sleep(0.02)
|
159 |
+
yield history
|
160 |
+
|
161 |
+
history[-1][1] = resp
|
162 |
+
yield history
|
163 |
+
|
164 |
+
|
165 |
+
def bot(history): # 定义bot函数生成回复
|
166 |
+
user_message = history[-1][0]
|
167 |
+
response = []
|
168 |
+
|
169 |
+
logger.debug(f"{user_message=}")
|
170 |
+
|
171 |
+
with about_time() as atime: # type: ignore # 测量生成用时
|
172 |
+
flag = 1
|
173 |
+
prefix = ""
|
174 |
+
then = time.time()
|
175 |
+
|
176 |
+
logger.debug("about to generate")
|
177 |
+
|
178 |
+
config = GenerationConfig(reset=True) # 配置生成参数
|
179 |
+
for elm in generate(user_message, config=config): # 生成回复
|
180 |
+
if flag == 1:
|
181 |
+
logger.debug("in the loop")
|
182 |
+
prefix = f"({time.time() - then:.2f}s) "
|
183 |
+
flag = 0
|
184 |
+
print(prefix, end="", flush=True)
|
185 |
+
logger.debug(f"{prefix=}")
|
186 |
+
print(elm, end="", flush=True)
|
187 |
+
# logger.debug(f"{elm}")
|
188 |
+
|
189 |
+
response.append(elm)
|
190 |
+
history[-1][1] = prefix + "".join(response) # 拼接前缀和生成内容到回复中
|
191 |
+
yield history
|
192 |
+
|
193 |
+
_ = (
|
194 |
+
f"(time elapsed: {atime.duration_human}, " # type: ignore # 生成用时信息
|
195 |
+
f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
|
196 |
+
)
|
197 |
+
|
198 |
+
history[-1][1] = "".join(response) + f"\n{_}" # 拼接生成内容和用时信息为最终回复
|
199 |
+
yield history
|
200 |
+
|
201 |
+
|
202 |
+
def predict_api(prompt): # 定义预测API函数
|
203 |
+
logger.debug(f"{prompt=}")
|
204 |
+
try:
|
205 |
+
# user_prompt = prompt
|
206 |
+
config = GenerationConfig( # 配置生成参数
|
207 |
+
temperature=0.7,
|
208 |
+
top_k=10,
|
209 |
+
top_p=0.9,
|
210 |
+
repetition_penalty=1.0,
|
211 |
+
max_new_tokens=512, # adjust as needed
|
212 |
+
seed=42,
|
213 |
+
reset=True, # reset history (cache)
|
214 |
+
stream=False,
|
215 |
+
# threads=cpu_count,
|
216 |
+
# stop=prompt_prefix[1:2],
|
217 |
+
)
|
218 |
+
|
219 |
+
response = generate( # 生成回复
|
220 |
+
prompt,
|
221 |
+
config=config,
|
222 |
+
)
|
223 |
+
|
224 |
+
logger.debug(f"api: {response=}")
|
225 |
+
except Exception as exc:
|
226 |
+
logger.error(exc)
|
227 |
+
response = f"{exc=}"
|
228 |
+
# bot = {"inputs": [response]}
|
229 |
+
# bot = [(prompt, response)]
|
230 |
+
|
231 |
+
return response
|
232 |
+
|
233 |
+
|
234 |
+
css = """ # 定义css样式
|
235 |
+
.importantButton {
|
236 |
+
background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
|
237 |
+
border: none !important;
|
238 |
+
}
|
239 |
+
.importantButton:hover {
|
240 |
+
background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
|
241 |
+
border: none !important;
|
242 |
+
}
|
243 |
+
.disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
|
244 |
+
.xsmall {font-size: x-small;}
|
245 |
+
"""
|
246 |
+
etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
|
247 |
+
examples_list = [ # 定义示例输入列表
|
248 |
+
["Hi, littel cat!"],
|
249 |
+
[
|
250 |
+
"Hello."
|
251 |
+
]
|
252 |
+
]
|
253 |
+
logger.info("start block")
|
254 |
+
|
255 |
+
with gr.Blocks( # 使用gradio构建界面
|
256 |
+
title=f"{Path(model_loc).name}",
|
257 |
+
theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
|
258 |
+
css=css,
|
259 |
+
) as block:
|
260 |
+
# buff_var = gr.State("")
|
261 |
+
with gr.Accordion("🎈 Info", open=False): # 折叠面板显示模型信息
|
262 |
+
# gr.HTML(
|
263 |
+
# """<center><a href="https://huggingface.co/spaces/mikeee/mpt-30b-chat?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate"></a> and spin a CPU UPGRADE to avoid the queue</center>"""
|
264 |
+
# )
|
265 |
+
gr.Markdown(
|
266 |
+
f"""<h5><center>{Path(model_loc).name}</center></h4>
|
267 |
+
超级小猫使用LLaMA-2-13b-chat,调用16G的CPU运行,速度比较慢,请见谅。模型数据主要为英文,建议使用英文进行问答""",
|
268 |
+
elem_classes="xsmall",
|
269 |
+
)
|
270 |
+
|
271 |
+
# chatbot = gr.Chatbot().style(height=700) # 500
|
272 |
+
chatbot = gr.Chatbot(height=500) # 聊天界面
|
273 |
+
|
274 |
+
# buff = gr.Textbox(show_label=False, visible=True)
|
275 |
+
|
276 |
+
with gr.Row(): # 输入区域
|
277 |
+
with gr.Column(scale=5):
|
278 |
+
msg = gr.Textbox(
|
279 |
+
label="Chat Message Box",
|
280 |
+
placeholder="Ask me anything (press Shift+Enter or click Submit to send)",
|
281 |
+
show_label=False,
|
282 |
+
# container=False,
|
283 |
+
lines=6,
|
284 |
+
max_lines=30,
|
285 |
+
show_copy_button=True,
|
286 |
+
# ).style(container=False)
|
287 |
+
)
|
288 |
+
with gr.Column(scale=1, min_width=50):
|
289 |
+
with gr.Row():
|
290 |
+
submit = gr.Button("发送", elem_classes="xsmall") # 提交按钮
|
291 |
+
stop = gr.Button("停止", visible=True) # 停止按钮
|
292 |
+
clear = gr.Button("清除历史会话", visible=True) # 清空历史按钮
|
293 |
+
|
294 |
+
with gr.Accordion("Example Inputs", open=True): # 示例输入面板
|
295 |
+
examples = gr.Examples(
|
296 |
+
examples=examples_list,
|
297 |
+
inputs=[msg],
|
298 |
+
examples_per_page=40,
|
299 |
+
)
|
300 |
+
|
301 |
+
# with gr.Row():
|
302 |
+
with gr.Accordion("Disclaimer", open=False): # 免责声明面板
|
303 |
+
_ = Path(model_loc).name
|
304 |
+
gr.Markdown(
|
305 |
+
"免责声明:Isaac - AI (POWERED BY LLAMA 2) 可能会产生与事实不符的输出,不应依赖它来产生 "
|
306 |
+
"事实准确的信息。Isaac - AI (POWERED BY LLAMA 2) 是在各种公共数据集上进行训练的;虽然已尽 "
|
307 |
+
"已尽力清理预训练数据,但该模型仍有可能产生不良内容,"
|
308 |
+
"有偏见或其他冒犯性的输出",
|
309 |
+
elem_classes=["disclaimer"],
|
310 |
+
)
|
311 |
+
|
312 |
+
msg_submit_event = msg.submit( # 提交事件绑定user函数和bot函数
|
313 |
+
# fn=conversation.user_turn,
|
314 |
+
fn=user,
|
315 |
+
inputs=[msg, chatbot],
|
316 |
+
outputs=[msg, chatbot],
|
317 |
+
queue=True,
|
318 |
+
show_progress="full",
|
319 |
+
# api_name=None,
|
320 |
+
).then(bot, chatbot, chatbot, queue=True)
|
321 |
+
submit_click_event = submit.click( # 点击提交按钮事件,绑定user1函数清空输入和bot函数
|
322 |
+
# fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
|
323 |
+
fn=user1, # clear msg
|
324 |
+
inputs=[msg, chatbot],
|
325 |
+
outputs=[msg, chatbot],
|
326 |
+
queue=True,
|
327 |
+
# queue=False,
|
328 |
+
show_progress="full",
|
329 |
+
# api_name=None,
|
330 |
+
).then(bot, chatbot, chatbot, queue=True)
|
331 |
+
stop.click( # 点击停止按钮清空队列
|
332 |
+
fn=None,
|
333 |
+
inputs=None,
|
334 |
+
outputs=None,
|
335 |
+
cancels=[msg_submit_event, submit_click_event],
|
336 |
+
queue=False,
|
337 |
+
)
|
338 |
+
clear.click(lambda: None, None, chatbot, queue=False) # 点击清空历史按钮
|
339 |
+
|
340 |
+
with gr.Accordion("For Chat/Translation API", open=False, visible=False): # API调用面板
|
341 |
+
input_text = gr.Text()
|
342 |
+
api_btn = gr.Button("Go", variant="primary")
|
343 |
+
out_text = gr.Text()
|
344 |
+
|
345 |
+
api_btn.click( # 绑定API调用逻辑
|
346 |
+
predict_api,
|
347 |
+
input_text,
|
348 |
+
out_text,
|
349 |
+
api_name="api",
|
350 |
+
)
|
351 |
+
|
352 |
+
# block.load(update_buff, [], buff, every=1)
|
353 |
+
# block.load(update_buff, [buff_var], [buff_var, buff], every=1)
|
354 |
+
|
355 |
+
# concurrency_count=5, max_size=20
|
356 |
+
# max_size=36, concurrency_count=14
|
357 |
+
# CPU cpu_count=2 16G, model 7G
|
358 |
+
# CPU UPGRADE cpu_count=8 32G, model 7G
|
359 |
+
|
360 |
+
# does not work
|
361 |
+
_ = """
|
362 |
+
# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1)
|
363 |
+
# concurrency_count = max(_, 1)
|
364 |
+
if psutil.cpu_count(logical=False) >= 8:
|
365 |
+
# concurrency_count = max(int(32 / file_size) - 1, 1)
|
366 |
+
else:
|
367 |
+
# concurrency_count = max(int(16 / file_size) - 1, 1)
|
368 |
+
# """
|
369 |
+
|
370 |
+
concurrency_count = 1 # 并发数设置为1
|
371 |
+
logger.info(f"{concurrency_count=}")
|
372 |
+
|
373 |
+
block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True) # 启动服务器
|
374 |
+
|
375 |
+
|
376 |
+
|
377 |
+
|