RunningYou commited on
Commit
73e49ee
1 Parent(s): ffab2fd
Files changed (1) hide show
  1. app.py +27 -13
app.py CHANGED
@@ -64,31 +64,45 @@ def mediapipe_segmentation(image_file, mask_file):
64
  cv2.imwrite(mask_file, output_image)
65
 
66
 
67
- def image_inpainting(prompt, image_path, mask_image_path, num_samples=4):
68
  image = PIL.Image.open(image_path).convert("RGB").resize((512, 512))
69
  mask_image = PIL.Image.open(mask_image_path).convert("RGB").resize((512, 512))
 
 
 
70
 
71
- guidance_scale = 7.5
72
- generator = torch.Generator(device=device).manual_seed(0) # change the seed to get different results
73
-
74
- images = pipe(prompt=prompt, image=image, mask_image=mask_image, guidance_scale=guidance_scale, generator=generator,
75
- num_images_per_prompt=num_samples).images
76
 
77
  # insert initial image in the list so we can compare side by side
78
  # images.insert(0, image)
79
  return image_grid(images, 2, math.ceil(num_samples/2))
80
 
81
 
 
 
 
 
82
  def predict1(dict, prompt):
83
  dict['image'].save('image.png')
84
- dict['mask'].save('mask.png')
85
- mediapipe_segmentation('image.png', 'm_mask.png')
 
 
86
 
87
- image = image_inpainting(prompt, image_path='image.png', mask_image_path='m_mask.png')
 
 
 
 
88
  return image
89
 
90
 
91
- title = "Person Matting & Stable Diffusion In-Painting"
92
- description = "Inpainting Stable Diffusion <br/><b>mediapipe + Stable Diffusion<b/><br/>"
93
- gr.Interface(predict1, inputs=[gr.Image(source='upload', tool='sketch', type='pil'), gr.Textbox(label='prompt')],
94
- outputs='image', title=title, description=description).launch(max_threads=True)
 
 
 
64
  cv2.imwrite(mask_file, output_image)
65
 
66
 
67
+ def image_inpainting(prompt, image_path, mask_image_path, num_samples=4, is_origin=False):
68
  image = PIL.Image.open(image_path).convert("RGB").resize((512, 512))
69
  mask_image = PIL.Image.open(mask_image_path).convert("RGB").resize((512, 512))
70
+ if not is_origin:
71
+ guidance_scale = 7.5
72
+ generator = torch.Generator(device=device).manual_seed(0) # change the seed to get different results
73
 
74
+ images = pipe(prompt=prompt, image=image, mask_image=mask_image, guidance_scale=guidance_scale, generator=generator,
75
+ num_images_per_prompt=num_samples).images
76
+ else:
77
+ images = pipe(prompt=prompt, image=image, mask_image=mask_image, num_images_per_prompt=num_samples).images
 
78
 
79
  # insert initial image in the list so we can compare side by side
80
  # images.insert(0, image)
81
  return image_grid(images, 2, math.ceil(num_samples/2))
82
 
83
 
84
+ title = "Person Matting & Stable Diffusion In-Painting"
85
+ description = "Inpainting Stable Diffusion <br/><b>mediapipe + Stable Diffusion<b/><br/>"
86
+
87
+
88
  def predict1(dict, prompt):
89
  dict['image'].save('image.png')
90
+ # dict['mask'].save('mask.png')
91
+ mediapipe_segmentation('image.png')
92
+ image = image_inpainting(prompt, image_path='image.png', mask_image_path='m_mask.png', is_origin=False)
93
+ return image
94
 
95
+
96
+ def predict2(dict, prompt):
97
+ dict['image'].save('image.png')
98
+ dict['mask'].save('mask.png')
99
+ image = image_inpainting(prompt, image_path='image.png', mask_image_path='mask.png', is_origin=True)
100
  return image
101
 
102
 
103
+ image_input = gr.Image(source='upload', tool='sketch', type='pil')
104
+ prompt = gr.Textbox(label='prompt')
105
+
106
+ greeter_1 = gr.Interface(predict1, inputs=[image_input, prompt], outputs=gr.Image(label='auto'))
107
+ greeter_2 = gr.Interface(predict2, inputs=[image_input, prompt], outputs=gr.Image(label='paint'))
108
+ demo = gr.Parallel(greeter_1, greeter_2).launch(max_threads=True)