Spaces:
Runtime error
Runtime error
init
Browse files- app.py +360 -0
- file_server.py +50 -0
- requirements.txt +11 -0
app.py
ADDED
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
"""A simple web interactive chat demo based on gradio."""
|
7 |
+
import os
|
8 |
+
from argparse import ArgumentParser
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
import mdtex2html
|
12 |
+
import piexif
|
13 |
+
import os
|
14 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
15 |
+
from transformers.generation import GenerationConfig
|
16 |
+
from diffusers import AutoPipelineForText2Image
|
17 |
+
import torch
|
18 |
+
import json
|
19 |
+
import time
|
20 |
+
import datetime
|
21 |
+
from file_server import start_server, get_local_ip
|
22 |
+
|
23 |
+
DEFAULT_CKPT_PATH = 'hahahafofo/Qwen-1_8B-Stable-Diffusion-Prompt'
|
24 |
+
OUTPUT_IMAGES_DIR = "output_images"
|
25 |
+
OUTPUT_HTML_DIR = "output_html"
|
26 |
+
|
27 |
+
|
28 |
+
def _get_args():
|
29 |
+
parser = ArgumentParser()
|
30 |
+
parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
|
31 |
+
help="Checkpoint name or path, default to %(default)r")
|
32 |
+
parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
|
33 |
+
|
34 |
+
parser.add_argument("--share", action="store_true", default=False,
|
35 |
+
help="Create a publicly shareable link for the interface.")
|
36 |
+
parser.add_argument("--inbrowser", action="store_true", default=False,
|
37 |
+
help="Automatically launch the interface in a new tab on the default browser.")
|
38 |
+
parser.add_argument("--server-port", type=int, default=8000,
|
39 |
+
help="Demo server port.")
|
40 |
+
parser.add_argument("--server-name", type=str, default="0.0.0.0",
|
41 |
+
help="Demo server name.")
|
42 |
+
parser.add_argument("--file-server-port", type=int, default=8001,
|
43 |
+
help="file server port.")
|
44 |
+
args = parser.parse_args()
|
45 |
+
return args
|
46 |
+
|
47 |
+
|
48 |
+
def _load_model_tokenizer(args):
|
49 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
50 |
+
args.checkpoint_path, trust_remote_code=True, resume_download=True,
|
51 |
+
)
|
52 |
+
|
53 |
+
if args.cpu_only:
|
54 |
+
device_map = "cpu"
|
55 |
+
else:
|
56 |
+
device_map = "auto"
|
57 |
+
|
58 |
+
model = AutoModelForCausalLM.from_pretrained(
|
59 |
+
args.checkpoint_path,
|
60 |
+
device_map=device_map,
|
61 |
+
trust_remote_code=True,
|
62 |
+
resume_download=True,
|
63 |
+
).eval()
|
64 |
+
|
65 |
+
config = GenerationConfig.from_pretrained(
|
66 |
+
args.checkpoint_path,
|
67 |
+
trust_remote_code=True,
|
68 |
+
resume_download=True,
|
69 |
+
|
70 |
+
)
|
71 |
+
config.max_new_tokens = 100
|
72 |
+
|
73 |
+
return model, tokenizer, config
|
74 |
+
|
75 |
+
|
76 |
+
def _load_sdxl_turbo():
|
77 |
+
pipe = AutoPipelineForText2Image.from_pretrained(
|
78 |
+
"stabilityai/sdxl-turbo",
|
79 |
+
torch_dtype=torch.float16,
|
80 |
+
variant="fp16"
|
81 |
+
)
|
82 |
+
pipe.to("cuda")
|
83 |
+
return pipe
|
84 |
+
|
85 |
+
|
86 |
+
def postprocess(self, y):
|
87 |
+
if y is None:
|
88 |
+
return []
|
89 |
+
for i, (message, response) in enumerate(y):
|
90 |
+
y[i] = (
|
91 |
+
None if message is None else mdtex2html.convert(message),
|
92 |
+
None if response is None else mdtex2html.convert(response),
|
93 |
+
)
|
94 |
+
return y
|
95 |
+
|
96 |
+
|
97 |
+
gr.Chatbot.postprocess = postprocess
|
98 |
+
|
99 |
+
|
100 |
+
def _save_image2html(image, query, prompt):
|
101 |
+
# 将文本信息编码为 JSON 并保存到 EXIF
|
102 |
+
exif_dict = {"0th": {}, "Exif": {}, "1st": {}, "thumbnail": None, "GPS": {}}
|
103 |
+
exif_dict["0th"][piexif.ImageIFD.ImageDescription] = json.dumps({"prompt": prompt})
|
104 |
+
exif_bytes = piexif.dump(exif_dict)
|
105 |
+
|
106 |
+
file_name = f"{int(time.time())}.png"
|
107 |
+
image_path = os.path.join(OUTPUT_IMAGES_DIR, file_name)
|
108 |
+
image.save(image_path, "PNG", exif=exif_bytes)
|
109 |
+
# 创建 HTML 内容
|
110 |
+
# 初始 HTML 结构
|
111 |
+
|
112 |
+
html_start = """<!DOCTYPE html><html lang="zh"><head><meta charset="UTF-8">
|
113 |
+
<title>Image and Prompt History</title></head><body><h1>Image and Prompt History</h1><ul>"""
|
114 |
+
html_end = "</ul></body></html>"
|
115 |
+
# 将 HTML 内容写入文件
|
116 |
+
html_file_path = os.path.join(OUTPUT_HTML_DIR, f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html")
|
117 |
+
# 创建新的列表项
|
118 |
+
new_list_item = f"""
|
119 |
+
<li>
|
120 |
+
<p>Prompt: {prompt}</p>
|
121 |
+
<p>Input: {query}</p>
|
122 |
+
<img src="{image_path}" alt="{image_path}" style="max-width: 100%; height: auto;">
|
123 |
+
</li>
|
124 |
+
"""
|
125 |
+
|
126 |
+
# 读取现有的 HTML 文件
|
127 |
+
try:
|
128 |
+
with open(html_file_path, 'r', encoding='utf-8') as file:
|
129 |
+
existing_html = file.read()
|
130 |
+
except FileNotFoundError:
|
131 |
+
# 如果文件不存在,创建一个新的 HTML 结构
|
132 |
+
existing_html = html_start + html_end
|
133 |
+
|
134 |
+
# 在列表结束标签前插入新的列表项
|
135 |
+
updated_html = existing_html.replace(html_end, new_list_item + html_end)
|
136 |
+
|
137 |
+
# 将更新后的 HTML 写回文件
|
138 |
+
with open(html_file_path, 'w+', encoding='utf-8') as file:
|
139 |
+
file.write(updated_html)
|
140 |
+
|
141 |
+
return f"HTML content appended to {html_file_path}"
|
142 |
+
|
143 |
+
|
144 |
+
def _parse_text(text):
|
145 |
+
lines = text.split("\n")
|
146 |
+
lines = [line for line in lines if line != ""]
|
147 |
+
count = 0
|
148 |
+
for i, line in enumerate(lines):
|
149 |
+
if "```" in line:
|
150 |
+
count += 1
|
151 |
+
items = line.split("`")
|
152 |
+
if count % 2 == 1:
|
153 |
+
lines[i] = f'<pre><code class="language-{items[-1]}">'
|
154 |
+
else:
|
155 |
+
lines[i] = f"<br></code></pre>"
|
156 |
+
else:
|
157 |
+
if i > 0:
|
158 |
+
if count % 2 == 1:
|
159 |
+
line = line.replace("`", r"\`")
|
160 |
+
line = line.replace("<", "<")
|
161 |
+
line = line.replace(">", ">")
|
162 |
+
line = line.replace(" ", " ")
|
163 |
+
line = line.replace("*", "*")
|
164 |
+
line = line.replace("_", "_")
|
165 |
+
line = line.replace("-", "-")
|
166 |
+
line = line.replace(".", ".")
|
167 |
+
line = line.replace("!", "!")
|
168 |
+
line = line.replace("(", "(")
|
169 |
+
line = line.replace(")", ")")
|
170 |
+
line = line.replace("$", "$")
|
171 |
+
lines[i] = "<br>" + line
|
172 |
+
text = "".join(lines)
|
173 |
+
return text
|
174 |
+
|
175 |
+
|
176 |
+
def _launch_demo(args, image_pipe, model, tokenizer, config):
|
177 |
+
def predict(
|
178 |
+
_query,
|
179 |
+
_chatbot,
|
180 |
+
_task_history,
|
181 |
+
_prompt_system: str = "You are a helpful assistant",
|
182 |
+
_prompt_template: str = ""
|
183 |
+
):
|
184 |
+
print(f"User: {_parse_text(_query)}")
|
185 |
+
_chatbot.append((_parse_text(_query), ""))
|
186 |
+
full_response = ""
|
187 |
+
_query = f"{_prompt_template}\n{_query}"
|
188 |
+
|
189 |
+
for response in model.chat_stream(
|
190 |
+
tokenizer,
|
191 |
+
_query,
|
192 |
+
history=_task_history,
|
193 |
+
generation_config=config,
|
194 |
+
system=_prompt_system
|
195 |
+
):
|
196 |
+
_chatbot[-1] = (_parse_text(_query), _parse_text(response))
|
197 |
+
|
198 |
+
yield _chatbot
|
199 |
+
full_response = _parse_text(response)
|
200 |
+
|
201 |
+
print(f"History: {_task_history}")
|
202 |
+
_task_history.append((_query, full_response))
|
203 |
+
print(f"Qwen-Chat: {_parse_text(full_response)}")
|
204 |
+
|
205 |
+
def draw_image(_chatbot, _task_history, num_inference_steps, ):
|
206 |
+
if len(_task_history) == 0:
|
207 |
+
return
|
208 |
+
prompt = _task_history[-1][-1]
|
209 |
+
if len(prompt) == 0:
|
210 |
+
return
|
211 |
+
print(f"===\n{_chatbot} \n\n{_task_history} ====\n")
|
212 |
+
print(f"{prompt}")
|
213 |
+
image_pil = image_pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=0.0).images[0]
|
214 |
+
_save_image2html(image_pil, query=_chatbot[-1][0], prompt=prompt)
|
215 |
+
return image_pil
|
216 |
+
|
217 |
+
def regenerate(_chatbot, _task_history, _prompt_system):
|
218 |
+
if not _task_history:
|
219 |
+
yield _chatbot
|
220 |
+
return
|
221 |
+
item = _task_history.pop(-1)
|
222 |
+
_chatbot.pop(-1)
|
223 |
+
yield from predict(item[0], _chatbot, _task_history, _prompt_template="", _prompt_system=_prompt_system)
|
224 |
+
|
225 |
+
def reset_user_input():
|
226 |
+
return gr.update(value="")
|
227 |
+
|
228 |
+
def reset_state(_chatbot, _task_history):
|
229 |
+
_task_history.clear()
|
230 |
+
_chatbot.clear()
|
231 |
+
import gc
|
232 |
+
gc.collect()
|
233 |
+
torch.cuda.empty_cache()
|
234 |
+
return _chatbot
|
235 |
+
|
236 |
+
with gr.Blocks() as demo:
|
237 |
+
task_history = gr.State([])
|
238 |
+
|
239 |
+
with gr.Row():
|
240 |
+
with gr.Column(scale=1, min_width=600):
|
241 |
+
image = gr.Image(type="pil")
|
242 |
+
query = gr.Textbox(lines=4, label='Input')
|
243 |
+
with gr.Row():
|
244 |
+
empty_btn = gr.Button("🧹 Clear History (清除历史)")
|
245 |
+
submit_btn = gr.Button("🚀 Submit (生成)")
|
246 |
+
regen_btn = gr.Button("🤔️ Regenerate (重试)")
|
247 |
+
image_btn = gr.Button("🎨 Image (生成)")
|
248 |
+
talk_btn = gr.Button("💬 Talk (聊天)")
|
249 |
+
with gr.Column(scale=1, min_width=600):
|
250 |
+
with gr.Tab(label="Qwen"):
|
251 |
+
temperature = gr.Slider(
|
252 |
+
minimum=0.0,
|
253 |
+
maximum=1.0,
|
254 |
+
step=0.01,
|
255 |
+
value=0.9,
|
256 |
+
label="Temperature",
|
257 |
+
info="越小越遵循输入,越大越充满想象"
|
258 |
+
)
|
259 |
+
with gr.Row():
|
260 |
+
prompt_system_radio = gr.Radio(
|
261 |
+
["中英文翻译", "文言文", "画家", "剧情", "AI助手"],
|
262 |
+
label='角色',
|
263 |
+
info="根据输入选择合适的角色"
|
264 |
+
)
|
265 |
+
prompt_system = gr.Textbox(
|
266 |
+
lines=1,
|
267 |
+
label='System Template',
|
268 |
+
value="你擅长翻译中文到英语。"
|
269 |
+
)
|
270 |
+
|
271 |
+
prompt_template = gr.Textbox(
|
272 |
+
lines=1,
|
273 |
+
label='Prompt Template',
|
274 |
+
value="必须使用英语根据主题描述一副画面:"
|
275 |
+
)
|
276 |
+
chatbot = gr.Chatbot(label='Qwen-Chat', elem_classes="control-height")
|
277 |
+
|
278 |
+
with gr.Tab(label="Config"):
|
279 |
+
with gr.Row():
|
280 |
+
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Top-p")
|
281 |
+
top_k = gr.Slider(minimum=0, maximum=100, step=1, value=50, label="Top-k")
|
282 |
+
max_new_tokens = gr.Slider(minimum=1, maximum=1024, step=1, value=100, label="Max New Tokens")
|
283 |
+
repetition_penalty = gr.Slider(
|
284 |
+
minimum=1.0,
|
285 |
+
maximum=2.0,
|
286 |
+
step=0.01,
|
287 |
+
value=1.1,
|
288 |
+
label="repetition penalty",
|
289 |
+
info="重复惩罚"
|
290 |
+
)
|
291 |
+
with gr.Row():
|
292 |
+
num_inference_steps = gr.Slider(minimum=1, maximum=60, step=1, value=4, label="Image Steps")
|
293 |
+
|
294 |
+
with gr.Tab(label="History"):
|
295 |
+
file_server = f"http://{get_local_ip()}:{args.file_server_port}/"
|
296 |
+
html_file_path = f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html"
|
297 |
+
html_fns = [fn for fn in os.listdir(OUTPUT_HTML_DIR) if fn.endswith(".html")]
|
298 |
+
|
299 |
+
gr.Markdown(f'<a href="{file_server}{html_file_path}" target="_blank">{html_file_path}</a>')
|
300 |
+
for fn in html_fns:
|
301 |
+
if fn == html_file_path:
|
302 |
+
continue
|
303 |
+
gr.Markdown(f'<a href="{file_server}{fn}" target="_blank">{fn}</a>')
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
|
308 |
+
PROMPT_SYSTEM_DICT = {
|
309 |
+
"中英文翻译": "你擅长翻译中文到英语。",
|
310 |
+
"文言文": "你擅长文言文翻译为英语。",
|
311 |
+
"画家": "你是绘画大师,擅长描绘画面细节。",
|
312 |
+
"剧情": "你是剧作家,擅长创作连续的漫画脚本。",
|
313 |
+
"AI助手": "You are a helpful assistant",
|
314 |
+
}
|
315 |
+
prompt_system_radio.change(lambda val: (PROMPT_SYSTEM_DICT[val]),
|
316 |
+
inputs=[prompt_system_radio], outputs=[prompt_system])
|
317 |
+
temperature.change(lambda val: config.update(temperature=val), inputs=[temperature], outputs=[])
|
318 |
+
top_k.change(lambda val: config.update(top_k=val), inputs=[top_k], outputs=[])
|
319 |
+
top_p.change(lambda val: config.update(top_p=val), inputs=[top_p], outputs=[])
|
320 |
+
max_new_tokens.change(
|
321 |
+
lambda val: config.update(max_new_tokens=val),
|
322 |
+
inputs=[max_new_tokens],
|
323 |
+
outputs=[],
|
324 |
+
)
|
325 |
+
repetition_penalty.change(
|
326 |
+
lambda val: config.update(repetition_penalty=val),
|
327 |
+
inputs=[repetition_penalty],
|
328 |
+
outputs=[],
|
329 |
+
)
|
330 |
+
talk_btn.click(predict, [query, chatbot, task_history, prompt_system], [chatbot],
|
331 |
+
show_progress=True)
|
332 |
+
|
333 |
+
submit_btn.click(predict, [query, chatbot, task_history, prompt_system, prompt_template], [chatbot],
|
334 |
+
show_progress=True)
|
335 |
+
submit_btn.click(reset_user_input, [], [query])
|
336 |
+
empty_btn.click(reset_state, [chatbot, task_history], outputs=[chatbot], show_progress=True)
|
337 |
+
image_btn.click(draw_image, [chatbot, task_history, num_inference_steps], outputs=[image],
|
338 |
+
show_progress=True)
|
339 |
+
regen_btn.click(regenerate, [chatbot, task_history, prompt_system], [chatbot], show_progress=True)
|
340 |
+
|
341 |
+
demo.queue().launch(
|
342 |
+
share=args.share,
|
343 |
+
inbrowser=args.inbrowser,
|
344 |
+
server_port=args.server_port,
|
345 |
+
server_name=args.server_name,
|
346 |
+
)
|
347 |
+
|
348 |
+
|
349 |
+
def main():
|
350 |
+
args = _get_args()
|
351 |
+
start_server(server_port=args.file_server_port)
|
352 |
+
os.makedirs(OUTPUT_IMAGES_DIR, exist_ok=True)
|
353 |
+
os.makedirs(OUTPUT_HTML_DIR, exist_ok=True)
|
354 |
+
model, tokenizer, config = _load_model_tokenizer(args)
|
355 |
+
pipe = _load_sdxl_turbo()
|
356 |
+
_launch_demo(args, pipe, model, tokenizer, config)
|
357 |
+
|
358 |
+
|
359 |
+
if __name__ == '__main__':
|
360 |
+
main()
|
file_server.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import http.server
|
2 |
+
import socketserver
|
3 |
+
import threading
|
4 |
+
import socket
|
5 |
+
import time
|
6 |
+
|
7 |
+
OUTPUT_HTML_DIR = "output_html"
|
8 |
+
|
9 |
+
|
10 |
+
def get_local_ip():
|
11 |
+
""" 获取当前计算机在局域网中的 IP 地址 """
|
12 |
+
try:
|
13 |
+
# 建立一个临时的连接,以便获取本地网络接口的IP地址
|
14 |
+
# 这里的 'www.baidu.com' 是Google的公共DNS服务器,端口为80
|
15 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
16 |
+
s.connect(("www.baidu.com", 80))
|
17 |
+
local_ip = s.getsockname()[0]
|
18 |
+
s.close()
|
19 |
+
return local_ip
|
20 |
+
except Exception as e:
|
21 |
+
print("获取本地IP地址时出错:", e)
|
22 |
+
return None
|
23 |
+
|
24 |
+
|
25 |
+
class MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
|
26 |
+
def do_GET(self):
|
27 |
+
# 指定要提供的文件的目录
|
28 |
+
if self.path.find(OUTPUT_HTML_DIR) == -1 and self.path.endswith(".html"):
|
29 |
+
self.path = OUTPUT_HTML_DIR + self.path
|
30 |
+
print(f"请求的文件路径为:{self.path}")
|
31 |
+
return http.server.SimpleHTTPRequestHandler.do_GET(self)
|
32 |
+
|
33 |
+
|
34 |
+
def _start_server(server_port):
|
35 |
+
with socketserver.TCPServer(("", server_port), MyHttpRequestHandler) as httpd:
|
36 |
+
print(f"HTTP 文件服务器启动在端口 {server_port}")
|
37 |
+
httpd.serve_forever()
|
38 |
+
|
39 |
+
|
40 |
+
def start_server(server_port):
|
41 |
+
# 在单独的线程中启动服务器
|
42 |
+
server_thread = threading.Thread(target=_start_server, args=(server_port,))
|
43 |
+
server_thread.daemon = True # 设置为守护线程,这样当主程序退出时,服务器线程也会退出
|
44 |
+
server_thread.start()
|
45 |
+
|
46 |
+
|
47 |
+
if __name__ == "__main__":
|
48 |
+
start_server(8001)
|
49 |
+
while True:
|
50 |
+
time.sleep(1000000)
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers==4.32.0
|
2 |
+
accelerate
|
3 |
+
tiktoken
|
4 |
+
einops
|
5 |
+
transformers_stream_generator==0.0.4
|
6 |
+
scipy
|
7 |
+
diffusers
|
8 |
+
mdtex2html
|
9 |
+
piexif
|
10 |
+
gradio<3.42
|
11 |
+
httpx[socks]
|