oshita-n commited on
Commit
4b08e6e
1 Parent(s): fcffa98
Files changed (1) hide show
  1. app.py +35 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
3
+ import torch
4
+ from PIL import Image
5
+ import numpy as np
6
+
7
+ def process(input_image, prompt):
8
+ inputs = processor(text=prompt, images=input_image, padding="max_length", return_tensors="pt")
9
+ # predict
10
+ with torch.no_grad():
11
+ outputs = model(**inputs)
12
+ preds = torch.sigmoid(outputs.logits).squeeze().detach().cpu().numpy()
13
+ preds = np.where(preds > 0.5, 255, 0).astype(np.uint8)
14
+ preds = Image.fromarray(preds.astype(np.uint8))
15
+ preds = np.array(preds.resize((input_image.width, input_image.height)))
16
+ print(preds)
17
+ return preds
18
+
19
+ if __name__ == '__main__':
20
+ processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
21
+ model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
22
+ input_image = gr.inputs.Image(label='image', type='pil')
23
+ prompt = gr.Textbox(label='Prompt')
24
+ ips = [
25
+ input_image, prompt
26
+ ]
27
+ outputs = "image"
28
+ input_size = (256, 256)
29
+ output_size = (256, 256)
30
+ iface = gr.Interface(fn=process,
31
+ inputs=ips,
32
+ outputs=outputs,
33
+ input_size=input_size,
34
+ output_size=output_size)
35
+ iface.launch()