adirik commited on
Commit
19cb3e9
β€’
1 Parent(s): 1d20a5b

merge CLIPSeg demo

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -1,12 +1,17 @@
1
- import gradio as gr
2
-
3
- from PIL import Image
4
  import os
5
  import torch
 
 
 
6
  from diffusers import DiffusionPipeline
 
7
 
8
  from share_btn import community_icon_html, loading_icon_html, share_js
9
 
 
 
 
 
10
  pipe = DiffusionPipeline.from_pretrained(
11
  "Fantasy-Studio/Paint-by-Example",
12
  torch_dtype=torch.float16,
@@ -14,14 +19,25 @@ pipe = DiffusionPipeline.from_pretrained(
14
  pipe = pipe.to("cuda")
15
 
16
 
17
- def read_content(file_path: str) -> str:
18
- """read the content of target file
19
- """
 
 
 
 
 
 
 
 
 
 
 
20
  with open(file_path, 'r', encoding='utf-8') as f:
21
  content = f.read()
22
-
23
  return content
24
 
 
25
  def predict(dict, reference, scale, seed, step):
26
  width, height = dict["image"].size
27
  if width < height:
@@ -123,8 +139,7 @@ with image_blocks as demo:
123
  community_icon = gr.HTML(community_icon_html, visible=True)
124
  loading_icon = gr.HTML(loading_icon_html, visible=True)
125
  share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
126
-
127
-
128
  with gr.Row():
129
  with gr.Column():
130
  gr.Examples(image_list, inputs=[image],label="Examples - Source Image",examples_per_page=12)
@@ -134,8 +149,6 @@ with image_blocks as demo:
134
  btn.click(fn=predict, inputs=[image, reference, guidance, seed, steps], outputs=[image_out, community_icon, loading_icon, share_button])
135
  share_button.click(None, [], [], _js=share_js)
136
 
137
-
138
-
139
  gr.HTML(
140
  """
141
  <div class="footer">
 
 
 
 
1
  import os
2
  import torch
3
+ import gradio as gr
4
+ from PIL import Image
5
+ import matplotlib.pyplot as plt
6
  from diffusers import DiffusionPipeline
7
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
8
 
9
  from share_btn import community_icon_html, loading_icon_html, share_js
10
 
11
+
12
+ processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
13
+ model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
14
+
15
  pipe = DiffusionPipeline.from_pretrained(
16
  "Fantasy-Studio/Paint-by-Example",
17
  torch_dtype=torch.float16,
 
19
  pipe = pipe.to("cuda")
20
 
21
 
22
+ def process_image(image, prompt):
23
+ inputs = processor(text=prompt, images=image, padding="max_length", return_tensors="pt")
24
+
25
+ # predict
26
+ with torch.no_grad():
27
+ outputs = model(**inputs)
28
+ preds = outputs.logits
29
+
30
+ filename = f"mask.png"
31
+ plt.imsave(filename, torch.sigmoid(preds))
32
+ return Image.open("mask.png").convert("RGB")
33
+
34
+
35
+ def read_content(file_path):
36
  with open(file_path, 'r', encoding='utf-8') as f:
37
  content = f.read()
 
38
  return content
39
 
40
+
41
  def predict(dict, reference, scale, seed, step):
42
  width, height = dict["image"].size
43
  if width < height:
 
139
  community_icon = gr.HTML(community_icon_html, visible=True)
140
  loading_icon = gr.HTML(loading_icon_html, visible=True)
141
  share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
142
+
 
143
  with gr.Row():
144
  with gr.Column():
145
  gr.Examples(image_list, inputs=[image],label="Examples - Source Image",examples_per_page=12)
 
149
  btn.click(fn=predict, inputs=[image, reference, guidance, seed, steps], outputs=[image_out, community_icon, loading_icon, share_button])
150
  share_button.click(None, [], [], _js=share_js)
151
 
 
 
152
  gr.HTML(
153
  """
154
  <div class="footer">