nightfury commited on
Commit
14f47c2
1 Parent(s): 6770691

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -53,13 +53,13 @@ def download_image(url):
53
  response = requests.get(url)
54
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
55
 
56
- device = "cuda" if torch.cuda.is_available() else "cpu"
57
 
58
  model_id_or_path = "CompVis/stable-diffusion-v1-4"
59
  pipe = StableDiffusionInpaintingPipeline.from_pretrained(
60
  model_id_or_path,
61
  revision="fp16",
62
- torch_dtype=torch.float,
63
  use_auth_token=auth_token
64
  )
65
 
@@ -258,6 +258,7 @@ with image_blocks as demo:
258
  with gr.Box(elem_id="mask_radio").style(border=False):
259
  radio = gr.Radio(["draw a mask above", "type what to mask below", "type what to keep"], value="draw a mask above", show_label=False, interactive=True).style(container=False)
260
  word_mask = gr.Textbox(label = "What to find in your image", interactive=False, elem_id="word_mask", placeholder="Disabled").style(container=False)
 
261
  prompt = gr.Textbox(label = 'Your prompt (what you want to add in place of what you are removing)')
262
  radio.change(fn=swap_word_mask, inputs=radio, outputs=word_mask,show_progress=False)
263
  radio.change(None, inputs=[], outputs=image_blocks, _js = """
@@ -278,10 +279,10 @@ with image_blocks as demo:
278
  </div>
279
 
280
 
281
- <div id="readme" class="Box-body readme blob js-code-block-container p-5 p-xl-6 gist-border-0">
282
  <article class="markdown-body entry-content container-lg" itemprop="text"><h1 dir="auto"><a id="user-content-image-segmentation-using-text-and-image-prompts" class="anchor" aria-hidden="true" href="#image-segmentation-using-text-and-image-prompts"><svg class="octicon octicon-link" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.775 3.275a.75.75 0 001.06 1.06l1.25-1.25a2 2 0 112.83 2.83l-2.5 2.5a2 2 0 01-2.83 0 .75.75 0 00-1.06 1.06 3.5 3.5 0 004.95 0l2.5-2.5a3.5 3.5 0 00-4.95-4.95l-1.25 1.25zm-4.69 9.64a2 2 0 010-2.83l2.5-2.5a2 2 0 012.83 0 .75.75 0 001.06-1.06 3.5 3.5 0 00-4.95 0l-2.5 2.5a3.5 3.5 0 004.95 4.95l1.25-1.25a.75.75 0 00-1.06-1.06l-1.25 1.25a2 2 0 01-2.83 0z"></path></svg></a>Image Segmentation Using Text and Image Prompts</h1>
283
  <p dir="auto">This repository contains the code used in the paper <a href="https://arxiv.org/abs/2112.10003" rel="nofollow">"Image Segmentation Using Text and Image Prompts"</a>.</p>
284
- <p dir="auto"><strong>The Paper has been accepted to CVPR 2022!</strong></p>
285
  <p dir="auto"><a target="_blank" rel="noopener noreferrer" href="/ThereforeGames/txt2mask/blob/main/repositories/clipseg/overview.png"><img src="/ThereforeGames/txt2mask/raw/main/repositories/clipseg/overview.png" alt="drawing" style="max-width: 100%;" height="200em"></a></p>
286
  <p dir="auto">The systems allows to create segmentation models without training based on:</p>
287
  <ul dir="auto">
 
53
  response = requests.get(url)
54
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
55
 
56
+ device = "cpu" #"cuda" if torch.cuda.is_available() else "cpu"
57
 
58
  model_id_or_path = "CompVis/stable-diffusion-v1-4"
59
  pipe = StableDiffusionInpaintingPipeline.from_pretrained(
60
  model_id_or_path,
61
  revision="fp16",
62
+ torch_dtype=torch.half, #float16
63
  use_auth_token=auth_token
64
  )
65
 
 
258
  with gr.Box(elem_id="mask_radio").style(border=False):
259
  radio = gr.Radio(["draw a mask above", "type what to mask below", "type what to keep"], value="draw a mask above", show_label=False, interactive=True).style(container=False)
260
  word_mask = gr.Textbox(label = "What to find in your image", interactive=False, elem_id="word_mask", placeholder="Disabled").style(container=False)
261
+ img_res = gr.inputs.Dropdown("512*512", "256*256")
262
  prompt = gr.Textbox(label = 'Your prompt (what you want to add in place of what you are removing)')
263
  radio.change(fn=swap_word_mask, inputs=radio, outputs=word_mask,show_progress=False)
264
  radio.change(None, inputs=[], outputs=image_blocks, _js = """
 
279
  </div>
280
 
281
 
282
+ <div id="readme" >
283
  <article class="markdown-body entry-content container-lg" itemprop="text"><h1 dir="auto"><a id="user-content-image-segmentation-using-text-and-image-prompts" class="anchor" aria-hidden="true" href="#image-segmentation-using-text-and-image-prompts"><svg class="octicon octicon-link" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M7.775 3.275a.75.75 0 001.06 1.06l1.25-1.25a2 2 0 112.83 2.83l-2.5 2.5a2 2 0 01-2.83 0 .75.75 0 00-1.06 1.06 3.5 3.5 0 004.95 0l2.5-2.5a3.5 3.5 0 00-4.95-4.95l-1.25 1.25zm-4.69 9.64a2 2 0 010-2.83l2.5-2.5a2 2 0 012.83 0 .75.75 0 001.06-1.06 3.5 3.5 0 00-4.95 0l-2.5 2.5a3.5 3.5 0 004.95 4.95l1.25-1.25a.75.75 0 00-1.06-1.06l-1.25 1.25a2 2 0 01-2.83 0z"></path></svg></a>Image Segmentation Using Text and Image Prompts</h1>
284
  <p dir="auto">This repository contains the code used in the paper <a href="https://arxiv.org/abs/2112.10003" rel="nofollow">"Image Segmentation Using Text and Image Prompts"</a>.</p>
285
+
286
  <p dir="auto"><a target="_blank" rel="noopener noreferrer" href="/ThereforeGames/txt2mask/blob/main/repositories/clipseg/overview.png"><img src="/ThereforeGames/txt2mask/raw/main/repositories/clipseg/overview.png" alt="drawing" style="max-width: 100%;" height="200em"></a></p>
287
  <p dir="auto">The systems allows to create segmentation models without training based on:</p>
288
  <ul dir="auto">