abhishek HF staff commited on
Commit
d786888
1 Parent(s): 949e2ae

clear cache

Browse files
__pycache__/app.cpython-38.pyc ADDED
Binary file (4.24 kB). View file
 
__pycache__/controlnet_inpaint.cpython-38.pyc ADDED
Binary file (35.9 kB). View file
 
app.py CHANGED
@@ -58,7 +58,6 @@ with gr.Blocks() as demo:
58
  seg_img = gr.Image(label="Segmentation", interactive=False)
59
  output_img = gr.Image(label="Output", interactive=False)
60
 
61
-
62
  with gr.Row():
63
  prompt_text = gr.Textbox(lines=1, label="Prompt")
64
  negative_prompt_text = gr.Textbox(lines=1, label="Negative Prompt")
@@ -78,6 +77,8 @@ with gr.Blocks() as demo:
78
  point_labels=input_label,
79
  multimask_output=False,
80
  )
 
 
81
  if bg:
82
  mask = np.logical_not(mask)
83
  mask = Image.fromarray(mask[0, :, :])
@@ -94,6 +95,8 @@ with gr.Blocks() as demo:
94
  rgb_mask[:, :, 2] = boolean_mask * rgb[2]
95
  finseg += rgb_mask
96
 
 
 
97
  return mask, finseg
98
 
99
  def inpaint(image, mask, seg_img, prompt, negative_prompt):
@@ -106,6 +109,7 @@ with gr.Blocks() as demo:
106
  seg_img = seg_img.resize((512, 512))
107
 
108
  output = pipe(prompt, image, mask, seg_img, negative_prompt=negative_prompt).images[0]
 
109
  return output
110
 
111
  def _clear(sel_pix, img, mask, seg, out, prompt, neg_prompt, bg):
 
58
  seg_img = gr.Image(label="Segmentation", interactive=False)
59
  output_img = gr.Image(label="Output", interactive=False)
60
 
 
61
  with gr.Row():
62
  prompt_text = gr.Textbox(lines=1, label="Prompt")
63
  negative_prompt_text = gr.Textbox(lines=1, label="Negative Prompt")
 
77
  point_labels=input_label,
78
  multimask_output=False,
79
  )
80
+ # clear torch cache
81
+ torch.cuda.empty_cache()
82
  if bg:
83
  mask = np.logical_not(mask)
84
  mask = Image.fromarray(mask[0, :, :])
 
95
  rgb_mask[:, :, 2] = boolean_mask * rgb[2]
96
  finseg += rgb_mask
97
 
98
+ torch.cuda.empty_cache()
99
+
100
  return mask, finseg
101
 
102
  def inpaint(image, mask, seg_img, prompt, negative_prompt):
 
109
  seg_img = seg_img.resize((512, 512))
110
 
111
  output = pipe(prompt, image, mask, seg_img, negative_prompt=negative_prompt).images[0]
112
+ torch.cuda.empty_cache()
113
  return output
114
 
115
  def _clear(sel_pix, img, mask, seg, out, prompt, neg_prompt, bg):