michaelj commited on
Commit
e2221d9
1 Parent(s): 1fc08d2

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +274 -0
Dockerfile CHANGED
@@ -17,4 +17,4 @@ WORKDIR $HOME/app
17
 
18
  COPY --chown=user . $HOME/app
19
 
20
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--reload"]
 
17
 
18
  COPY --chown=user . $HOME/app
19
 
20
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--reload"]
app.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from app_settings import AppSettings
2
+ from utils import show_system_info
3
+ import constants
4
+ from argparse import ArgumentParser
5
+ from context import Context
6
+ from constants import APP_VERSION, LCM_DEFAULT_MODEL_OPENVINO
7
+ from models.interface_types import InterfaceType
8
+ from constants import DEVICE
9
+ from state import get_settings
10
+ import traceback
11
+
12
+
13
+ from fastapi import FastAPI,Body
14
+
15
+ import uvicorn
16
+ import json
17
+ import logging
18
+ from PIL import Image
19
+ import time
20
+
21
+ from diffusers.utils import load_image
22
+ import base64
23
+ import io
24
+ from datetime import datetime
25
+
26
+ from typing import Any
27
+ from backend.models.lcmdiffusion_setting import DiffusionTask
28
+
29
+ from frontend.utils import is_reshape_required
30
+ from concurrent.futures import ThreadPoolExecutor
31
+
32
+
33
+ context = Context(InterfaceType.WEBUI)
34
+ previous_width = 0
35
+ previous_height = 0
36
+ previous_model_id = ""
37
+ previous_num_of_images = 0
38
+
39
+ # parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
40
+ # parser.add_argument(
41
+ # "-s",
42
+ # "--share",
43
+ # action="store_true",
44
+ # help="Create sharable link(Web UI)",
45
+ # required=False,
46
+ # )
47
+ # group = parser.add_mutually_exclusive_group(required=False)
48
+ # group.add_argument(
49
+ # "-g",
50
+ # "--gui",
51
+ # action="store_true",
52
+ # help="Start desktop GUI",
53
+ # )
54
+ # group.add_argument(
55
+ # "-w",
56
+ # "--webui",
57
+ # action="store_true",
58
+ # help="Start Web UI",
59
+ # )
60
+ # group.add_argument(
61
+ # "-r",
62
+ # "--realtime",
63
+ # action="store_true",
64
+ # help="Start realtime inference UI(experimental)",
65
+ # )
66
+ # group.add_argument(
67
+ # "-v",
68
+ # "--version",
69
+ # action="store_true",
70
+ # help="Version",
71
+ # )
72
+ # parser.add_argument(
73
+ # "--lcm_model_id",
74
+ # type=str,
75
+ # help="Model ID or path,Default SimianLuo/LCM_Dreamshaper_v7",
76
+ # default="SimianLuo/LCM_Dreamshaper_v7",
77
+ # )
78
+ # parser.add_argument(
79
+ # "--prompt",
80
+ # type=str,
81
+ # help="Describe the image you want to generate",
82
+ # )
83
+ # parser.add_argument(
84
+ # "--image_height",
85
+ # type=int,
86
+ # help="Height of the image",
87
+ # default=512,
88
+ # )
89
+ # parser.add_argument(
90
+ # "--image_width",
91
+ # type=int,
92
+ # help="Width of the image",
93
+ # default=512,
94
+ # )
95
+ # parser.add_argument(
96
+ # "--inference_steps",
97
+ # type=int,
98
+ # help="Number of steps,default : 4",
99
+ # default=4,
100
+ # )
101
+ # parser.add_argument(
102
+ # "--guidance_scale",
103
+ # type=int,
104
+ # help="Guidance scale,default : 1.0",
105
+ # default=1.0,
106
+ # )
107
+
108
+ # parser.add_argument(
109
+ # "--number_of_images",
110
+ # type=int,
111
+ # help="Number of images to generate ,default : 1",
112
+ # default=1,
113
+ # )
114
+ # parser.add_argument(
115
+ # "--seed",
116
+ # type=int,
117
+ # help="Seed,default : -1 (disabled) ",
118
+ # default=-1,
119
+ # )
120
+ # parser.add_argument(
121
+ # "--use_openvino",
122
+ # action="store_true",
123
+ # help="Use OpenVINO model",
124
+ # )
125
+
126
+ # parser.add_argument(
127
+ # "--use_offline_model",
128
+ # action="store_true",
129
+ # help="Use offline model",
130
+ # )
131
+ # parser.add_argument(
132
+ # "--use_safety_checker",
133
+ # action="store_false",
134
+ # help="Use safety checker",
135
+ # )
136
+ # parser.add_argument(
137
+ # "--use_lcm_lora",
138
+ # action="store_true",
139
+ # help="Use LCM-LoRA",
140
+ # )
141
+ # parser.add_argument(
142
+ # "--base_model_id",
143
+ # type=str,
144
+ # help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
145
+ # default="Lykon/dreamshaper-8",
146
+ # )
147
+ # parser.add_argument(
148
+ # "--lcm_lora_id",
149
+ # type=str,
150
+ # help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
151
+ # default="latent-consistency/lcm-lora-sdv1-5",
152
+ # )
153
+ # parser.add_argument(
154
+ # "-i",
155
+ # "--interactive",
156
+ # action="store_true",
157
+ # help="Interactive CLI mode",
158
+ # )
159
+ # parser.add_argument(
160
+ # "--use_tiny_auto_encoder",
161
+ # action="store_true",
162
+ # help="Use tiny auto encoder for SD (TAESD)",
163
+ # )
164
+ # args = parser.parse_args()
165
+
166
+ # if args.version:
167
+ # print(APP_VERSION)
168
+ # exit()
169
+
170
+ # parser.print_help()
171
+ show_system_info()
172
+ print(f"Using device : {constants.DEVICE}")
173
+ app_settings = get_settings()
174
+
175
+ print(f"Found {len(app_settings.lcm_models)} LCM models in config/lcm-models.txt")
176
+ print(
177
+ f"Found {len(app_settings.stable_diffsuion_models)} stable diffusion models in config/stable-diffusion-models.txt"
178
+ )
179
+ print(
180
+ f"Found {len(app_settings.lcm_lora_models)} LCM-LoRA models in config/lcm-lora-models.txt"
181
+ )
182
+ print(
183
+ f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
184
+ )
185
+ app_settings.settings.lcm_diffusion_setting.use_openvino = True
186
+ # from frontend.webui.ui import start_webui
187
+
188
+ # print("Starting web UI mode")
189
+ # start_webui(
190
+ # args.share,
191
+ # )
192
+
193
+ app = FastAPI(name="mutilParam")
194
+ print("我执行了")
195
+ @app.get("/")
196
+ def root():
197
+ return {"API": "hello"}
198
+
199
+ @app.post("/img2img")
200
+ async def predict(prompt=Body(...),imgbase64data=Body(...),negative_prompt=Body(None),userId=Body(None)):
201
+ MAX_QUEUE_SIZE = 4
202
+ start = time.time()
203
+ print("参数",imgbase64data,prompt)
204
+ image_data = base64.b64decode(imgbase64data)
205
+ image1 = Image.open(io.BytesIO(image_data))
206
+ w, h = image1.size
207
+ newW = 512
208
+ newH = int(h * newW / w)
209
+ img = image1.resize((newW, newH))
210
+ end1 = time.time()
211
+ now = datetime.now()
212
+ print(now)
213
+ print("图像:", img.size)
214
+ print("加载管道:", end1 - start)
215
+ global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
216
+
217
+ app_settings.settings.lcm_diffusion_setting.prompt = prompt
218
+ app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
219
+ app_settings.settings.lcm_diffusion_setting.init_image = image1
220
+ app_settings.settings.lcm_diffusion_setting.strength = 0.6
221
+
222
+ app_settings.settings.lcm_diffusion_setting.diffusion_task = (
223
+ DiffusionTask.image_to_image.value
224
+ )
225
+ model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
226
+ reshape = False
227
+ app_settings.settings.lcm_diffusion_setting.image_height=newH
228
+ image_width = app_settings.settings.lcm_diffusion_setting.image_width
229
+ image_height = app_settings.settings.lcm_diffusion_setting.image_height
230
+ num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
231
+ reshape = is_reshape_required(
232
+ previous_width,
233
+ image_width,
234
+ previous_height,
235
+ image_height,
236
+ previous_model_id,
237
+ model_id,
238
+ previous_num_of_images,
239
+ num_images,
240
+ )
241
+
242
+
243
+ with ThreadPoolExecutor(max_workers=1) as executor:
244
+ future = executor.submit(
245
+ context.generate_text_to_image,
246
+ app_settings.settings,
247
+ reshape,
248
+ DEVICE,
249
+ )
250
+ images = future.result()
251
+ previous_width = image_width
252
+ previous_height = image_height
253
+ previous_model_id = model_id
254
+ previous_num_of_images = num_images
255
+ output_image = images[0]
256
+ end2 = time.time()
257
+ print("测试",output_image)
258
+ print("s生成完成:", end2 - end1)
259
+ # 将图片对象转换为bytes
260
+ image_data = io.BytesIO()
261
+
262
+ # 将图像保存到BytesIO对象中,格式为JPEG
263
+ output_image.save(image_data, format='JPEG')
264
+
265
+ # 将BytesIO对象的内容转换为字节串
266
+ image_data_bytes = image_data.getvalue()
267
+ output_image_base64 = base64.b64encode(image_data_bytes).decode('utf-8')
268
+ print("完成的图片:", output_image_base64)
269
+ return output_image_base64
270
+
271
+
272
+ @app.post("/predict")
273
+ async def predict(prompt=Body(...)):
274
+ return f"您好,{prompt}"