chywang commited on
Commit
0ee72aa
1 Parent(s): 8983e5a

Upload 10 files

Browse files
Files changed (10) hide show
  1. 1.jpg +0 -0
  2. 3.jpg +0 -0
  3. 5.jpg +0 -0
  4. 6.jpg +0 -0
  5. 7.jpg +0 -0
  6. 8.jpg +0 -0
  7. 9.jpg +0 -0
  8. ai_service_python_sdk-1.1.1-py3-none-any.whl +0 -0
  9. app.py +343 -0
  10. requirements.txt +1 -0
1.jpg ADDED
3.jpg ADDED
5.jpg ADDED
6.jpg ADDED
7.jpg ADDED
8.jpg ADDED
9.jpg ADDED
ai_service_python_sdk-1.1.1-py3-none-any.whl ADDED
Binary file (48.3 kB). View file
 
app.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import gradio as gr
4
+ import base64
5
+ import cv2
6
+ import numpy as np
7
+ import oss2
8
+ import time
9
+
10
+ from ai_service_python_sdk.client.api.ai_service_aigc_images_api import AIGCImagesApi
11
+ from ai_service_python_sdk.client.api.ai_service_job_api import AiServiceJobApi
12
+ from ai_service_python_sdk.client.api_client import ApiClient
13
+ from ai_service_python_sdk.test import appId, host, token
14
+
15
+
16
+ host = os.getenv("PAI_REC_HOST")
17
+ appId = os.getenv("PAI_REC_APP_ID")
18
+ token = os.getenv("PAI_REC_TOKEN")
19
+ access_key_id = os.getenv('OSS_ACCESS_KEY_ID')
20
+ access_key_secret = os.getenv('OSS_ACCESS_KEY_SECRET')
21
+ bucket_name = os.getenv('OSS_BUCKET')
22
+ endpoint = os.getenv('OSS_ENDPOINT')
23
+
24
+
25
+ def upload_file(files, current_files):
26
+ file_paths = [file_d['name'] for file_d in current_files] + [file.name for file in files]
27
+ return file_paths
28
+
29
+
30
+ def decode_image_from_base64jpeg(base64_image):
31
+ image_bytes = base64.b64decode(base64_image)
32
+ np_arr = np.frombuffer(image_bytes, np.uint8)
33
+ image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
34
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
35
+ return image
36
+
37
+
38
+ def upload(image_path, number):
39
+ bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
40
+ file_name = image_path.split('/')[-1]
41
+ ext = file_name.split('.')[-1]
42
+ file_name = str(number) + '.' + ext
43
+ timestamp = str(time.time()).split('.')[0]
44
+ bucket_folder = 'aigc-data/easyphoto_demo_data/' + timestamp + '_user_lora/'
45
+ oss_file_path = bucket_folder + file_name
46
+ bucket.put_object_from_file(oss_file_path, image_path)
47
+ file_url = 'https://' + bucket_name + '.' + endpoint + '/' + bucket_folder + file_name
48
+ return file_url
49
+
50
+
51
+ def upload_template(image_path):
52
+ bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
53
+ file_name = image_path.split('/')[-1]
54
+ timestamp = str(time.time()).split('.')[0]
55
+ bucket_folder = 'aigc-data/easyphoto_demo_data/' + timestamp + '_user_template/'
56
+ oss_file_path = bucket_folder + file_name
57
+ bucket.put_object_from_file(oss_file_path, image_path)
58
+ file_url = 'https://' + bucket_name + '.' + endpoint + '/' + bucket_folder + file_name
59
+ return file_url
60
+
61
+
62
+ def easyphoto_train(instance_images):
63
+ images = []
64
+ if instance_images is None or len(instance_images)==0:
65
+ output = 'Status: no image updated! 没有上传照片'
66
+ return output, [], []
67
+ for number, image in enumerate(instance_images):
68
+ image_path = image['name']
69
+ image_url = upload(image_path, number)
70
+ images.append(image_url)
71
+ client = ApiClient(host, appId, token)
72
+ api = AIGCImagesApi(client)
73
+ response = api.aigc_images_train(images, '', None)
74
+ message = response.message
75
+ model_id = response.data['model_id']
76
+ job_id = response.data['job_id']
77
+ if message == 'success':
78
+ state = 'training job submitted. 提交训练任务成功'
79
+ output = 'Status: ' + state
80
+ print("job id: " + str(job_id))
81
+ print("model id: " + str(model_id))
82
+ return output, job_id, model_id
83
+ else:
84
+ output = 'Status: submitting training job failed! 提交训练任务失败'
85
+ return output, [], []
86
+
87
+
88
+ def easyphoto_check(job_id):
89
+ client = ApiClient(host, appId, token)
90
+ api = AiServiceJobApi(client)
91
+ if job_id is None:
92
+ output = 'Status: checking training status failed! No job id. 状态检查失败'
93
+ else:
94
+ try:
95
+ job_id = int(str(job_id).strip())
96
+ response = api.get_async_job_with_id(job_id)
97
+ message = response.data['job']['message']
98
+ output = 'Status: ' + message
99
+ except:
100
+ output = 'Status: checking training status failed! 状态检查失败'
101
+ return output
102
+
103
+
104
+ def easyphoto_infer(model_id, selected_template_images, additional_prompt, seed, before_face_fusion_ratio, after_face_fusion_ratio, first_diffusion_steps, first_denoising_strength, second_diffusion_steps, second_denoising_strength, crop_face_preprocess, apply_face_fusion_before, apply_face_fusion_after, color_shift_middle, color_shift_last, background_restore):
105
+ image_urls = []
106
+ if len(selected_template_images) == 0:
107
+ output_info = 'Status: no templete selected! 需要选择模版'
108
+ return output_info, []
109
+ selected_template_images = eval(selected_template_images)
110
+ for image in selected_template_images:
111
+ image_url = upload_template(image)
112
+ image_urls.append(image_url)
113
+
114
+ client = ApiClient(host, appId, token)
115
+ api = AIGCImagesApi(client)
116
+ outputs = []
117
+ output_info = None
118
+
119
+ if model_id is None:
120
+ output_info = 'Status: no model id provided! 需要提供模型id'
121
+ return output_info, []
122
+
123
+ model_id = str(model_id).strip()
124
+ print('model id: ' + model_id)
125
+
126
+ for image_url in image_urls:
127
+ try:
128
+ params = {
129
+ "additional_prompt": additional_prompt,
130
+ "seed": seed,
131
+ "before_face_fusion_ratio": before_face_fusion_ratio,
132
+ "after_face_fusion_ratio": after_face_fusion_ratio,
133
+ "first_diffusion_steps": first_diffusion_steps,
134
+ "first_denoising_strength": first_denoising_strength,
135
+ "second_diffusion_steps": second_diffusion_steps,
136
+ "second_denoising_strength": second_denoising_strength,
137
+ "crop_face_preprocess": crop_face_preprocess,
138
+ "apply_face_fusion_before": apply_face_fusion_before,
139
+ "apply_face_fusion_after": apply_face_fusion_after,
140
+ "color_shift_middle": color_shift_middle,
141
+ "color_shift_last": color_shift_last,
142
+ "background_restore": background_restore
143
+ }
144
+ response = api.aigc_images_create(model_id, image_url, 'photog_infer_with_webui_pmml', params)
145
+ except:
146
+ output_info = 'Status: calling eas service failed!'
147
+ return output_info, []
148
+
149
+ data = response.data
150
+ message = response.message
151
+ if message == 'success':
152
+ image = data['image']
153
+ image = decode_image_from_base64jpeg(image)
154
+ outputs.append(image)
155
+ output_info = 'Status: generating image succesfully! 图像生成成功'
156
+ else:
157
+ output_info = 'Status: generating image failed! 图像生成失败'
158
+ return output_info, []
159
+ return output_info, outputs
160
+
161
+
162
+ with gr.Blocks() as easyphoto_demo:
163
+ model_id = gr.Textbox(visible=False)
164
+ with gr.TabItem('Training 训练'):
165
+ with gr.Blocks():
166
+ with gr.Row():
167
+ with gr.Column():
168
+ instance_images = gr.Gallery().style(columns=[4], rows=[2], object_fit="contain", height="auto")
169
+ with gr.Row():
170
+ upload_button = gr.UploadButton(
171
+ "Upload Photos 上传照片", file_types=["image"], file_count="multiple"
172
+ )
173
+ clear_button = gr.Button("Clear Photos 清除照片")
174
+ clear_button.click(fn=lambda: [], inputs=None, outputs=instance_images)
175
+ upload_button.upload(upload_file, inputs=[upload_button, instance_images], outputs=instance_images, queue=False)
176
+ gr.Markdown(
177
+ '''
178
+ 训练步骤:
179
+
180
+ 1.请上传5-20张半身照片或头肩照片,请确保面部比例不要太小。
181
+
182
+ 2.点击下方的训练按钮,提交训练任务,大约需要15分钟,您可以检查您的训练任务状态。请不要重复点击提交训练任务的按钮!
183
+
184
+ 3.当模型训练完成后,任务状态会显示success,切换到推理模式,并根据模板生成照片。
185
+
186
+ 4.如果在上传时遇到卡顿,请修改上传图片的大小,尽量限制在1.5MB以内。
187
+
188
+ 5.在训练或推理过程中,请不要刷新或关闭窗口。
189
+ '''
190
+ )
191
+
192
+ job_id = gr.Textbox(visible=False)
193
+ with gr.Row():
194
+ run_button = gr.Button('Submit My Training Job 提交训练任务')
195
+ check_button = gr.Button('Check My Training Job Status 检查我的训练任务状态')
196
+ output_message = gr.Textbox(value="", label="Status 状态", interactive=False)
197
+
198
+ run_button.click(fn=easyphoto_train,
199
+ inputs=[instance_images],
200
+ outputs=[output_message, job_id, model_id])
201
+
202
+ check_button.click(fn=easyphoto_check,
203
+ inputs=[job_id],
204
+ outputs=[output_message])
205
+
206
+
207
+ with gr.TabItem('Inference 推理'):
208
+ templates = glob.glob(r'./*.jpg')
209
+ preset_template = list(templates)
210
+
211
+ with gr.Blocks() as demo:
212
+ with gr.Row():
213
+ with gr.Column():
214
+ template_gallery_list = [(i, i) for i in preset_template]
215
+ gallery = gr.Gallery(template_gallery_list).style(columns=[4], rows=[2], object_fit="contain", height="auto")
216
+
217
+ def select_function(evt: gr.SelectData):
218
+ return [preset_template[evt.index]]
219
+
220
+ selected_template_images = gr.Text(show_label=False, visible=False, placeholder="Selected")
221
+ gallery.select(select_function, None, selected_template_images)
222
+
223
+ with gr.Accordion("Advanced Options 参数设置", open=False):
224
+ additional_prompt = gr.Textbox(
225
+ label="Additional Prompt",
226
+ lines=3,
227
+ value='masterpiece, beauty',
228
+ interactive=True
229
+ )
230
+ seed = gr.Textbox(
231
+ label="Seed",
232
+ value=12345,
233
+ )
234
+ with gr.Row():
235
+ before_face_fusion_ratio = gr.Slider(
236
+ minimum=0.2, maximum=0.8, value=0.50,
237
+ step=0.05, label='Face Fusion Ratio Before'
238
+ )
239
+ after_face_fusion_ratio = gr.Slider(
240
+ minimum=0.2, maximum=0.8, value=0.50,
241
+ step=0.05, label='Face Fusion Ratio After'
242
+ )
243
+
244
+ with gr.Row():
245
+ first_diffusion_steps = gr.Slider(
246
+ minimum=15, maximum=50, value=50,
247
+ step=1, label='First Diffusion steps'
248
+ )
249
+ first_denoising_strength = gr.Slider(
250
+ minimum=0.30, maximum=0.60, value=0.45,
251
+ step=0.05, label='First Diffusion denoising strength'
252
+ )
253
+ with gr.Row():
254
+ second_diffusion_steps = gr.Slider(
255
+ minimum=15, maximum=50, value=20,
256
+ step=1, label='Second Diffusion steps'
257
+ )
258
+ second_denoising_strength = gr.Slider(
259
+ minimum=0.20, maximum=0.40, value=0.30,
260
+ step=0.05, label='Second Diffusion denoising strength'
261
+ )
262
+ with gr.Row():
263
+ crop_face_preprocess = gr.Checkbox(
264
+ label="Crop Face Preprocess",
265
+ value=True
266
+ )
267
+ apply_face_fusion_before = gr.Checkbox(
268
+ label="Apply Face Fusion Before",
269
+ value=True
270
+ )
271
+ apply_face_fusion_after = gr.Checkbox(
272
+ label="Apply Face Fusion After",
273
+ value=True
274
+ )
275
+ with gr.Row():
276
+ color_shift_middle = gr.Checkbox(
277
+ label="Apply color shift first",
278
+ value=True
279
+ )
280
+ color_shift_last = gr.Checkbox(
281
+ label="Apply color shift last",
282
+ value=True
283
+ )
284
+ background_restore = gr.Checkbox(
285
+ label="Background Restore",
286
+ value=False
287
+ )
288
+
289
+ with gr.Box():
290
+ gr.Markdown(
291
+ '''
292
+ Parameters:
293
+ 1. **Face Fusion Ratio Before** represents the proportion of the first facial fusion, which is higher and more similar to the training object.
294
+ 2. **Face Fusion Ratio After** represents the proportion of the second facial fusion, which is higher and more similar to the training object.
295
+ 3. **Crop Face Preprocess** represents whether to crop the image before generation, which can adapt to images with smaller faces.
296
+ 4. **Apply Face Fusion Before** represents whether to perform the first facial fusion.
297
+ 5. **Apply Face Fusion After** represents whether to perform the second facial fusion.
298
+
299
+ 参数:
300
+
301
+ 1.**Face Fusion Ratio Before**表示第一次面部融合的比例,更高且更接近训练对象。
302
+
303
+ 2.**Face Fusion Ratio After**表示第二次面部融合的比例,更高且更接近训练对象。
304
+
305
+ 3.**Crop Face Preprocess**表示是否在生成之前裁剪图像,以适应面部较小的图像。
306
+
307
+ 4.**Apply Face Fusion Before**表示是否执行第一次面部融合。
308
+
309
+ 5.**Apply Face Fusion After**表示是否执行第二次面部融合。
310
+ '''
311
+ )
312
+
313
+ with gr.Column():
314
+ gr.Markdown('Generated Results 生成结果')
315
+ output_images = gr.Gallery(
316
+ label='Output',
317
+ show_label=False
318
+ ).style(columns=[4], rows=[2], object_fit="contain", height="auto")
319
+ display_button = gr.Button('Start Generation 开始生成')
320
+ infer_progress = gr.Textbox(
321
+ label="Generation Progress 生成进度",
322
+ value="",
323
+ interactive=False
324
+ )
325
+
326
+ display_button.click(
327
+ fn=easyphoto_infer,
328
+ inputs=[model_id, selected_template_images, additional_prompt, seed, before_face_fusion_ratio, after_face_fusion_ratio, first_diffusion_steps, first_denoising_strength, second_diffusion_steps, second_denoising_strength, crop_face_preprocess, apply_face_fusion_before, apply_face_fusion_after, color_shift_middle, color_shift_last, background_restore],
329
+ outputs=[infer_progress, output_images]
330
+ )
331
+
332
+ gr.Markdown(
333
+ """
334
+ 参考链接
335
+
336
+ EasyPhoto GitHub:https://github.com/aigc-apps/sd-webui-EasyPhoto
337
+
338
+ 阿里云Freetier:https://help.aliyun.com/document_detail/2567864.html
339
+
340
+ 智码实验室:https://gallery.pai-ml.com/#/preview/deepLearning/cv/stable_diffusion_easyphoto
341
+ """)
342
+
343
+ easyphoto_demo.launch(share=True).queue()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ai_service_python_sdk-1.1.1-py3-none-any.whl