orpatashnik commited on
Commit
f65b8d3
1 Parent(s): b88450d

fix inversion

Browse files
Files changed (2) hide show
  1. gradio_app.py +6 -6
  2. main.py +2 -2
gradio_app.py CHANGED
@@ -13,10 +13,12 @@ from main import LPMConfig, main, setup
13
  DESCRIPTION = '''# Localizing Object-level Shape Variations with Text-to-Image Diffusion Models
14
  This is a demo for our ''Localizing Object-level Shape Variations with Text-to-Image Diffusion Models'' [paper](https://arxiv.org/abs/2303.11306).
15
  We introduce a method that generates object-level shape variation for a given image.
16
- This demo allows using a real image as well as a generated image. For a real image, a matching prompt is required.
 
17
  '''
18
 
19
  stable, stable_config = setup(LPMConfig())
 
20
 
21
  def main_pipeline(
22
  prompt: str,
@@ -30,8 +32,6 @@ def main_pipeline(
30
  seed: int,
31
  input_image: str):
32
  prompt = prompt.replace(object_of_interest, '{word}')
33
- print(number_of_variations)
34
- print(proxy_words)
35
  proxy_words = proxy_words.split(',') if proxy_words != '' else []
36
  objects_to_preserve = objects_to_preserve.split(',') if objects_to_preserve != '' else []
37
  background_nouns = background_nouns.split(',') if background_nouns != '' else []
@@ -48,7 +48,7 @@ def main_pipeline(
48
  real_image_path="" if input_image is None else input_image
49
  )
50
 
51
- result_images, result_proxy_words = main(stable, stable_config, args)
52
  result_images = [im.permute(1, 2, 0).cpu().numpy() for im in result_images]
53
  result_images = [(im * 255).astype(np.uint8) for im in result_images]
54
  result_images = [Image.fromarray(im) for im in result_images]
@@ -60,8 +60,8 @@ with gr.Blocks(css='style.css') as demo:
60
  gr.Markdown(DESCRIPTION)
61
 
62
  gr.HTML(
63
- '''<center><a href="https://huggingface.co/spaces/orpatashnik/local-prompt-mixing?duplicate=true">
64
- <img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
65
 
66
  with gr.Row():
67
  with gr.Column():
 
13
  DESCRIPTION = '''# Localizing Object-level Shape Variations with Text-to-Image Diffusion Models
14
  This is a demo for our ''Localizing Object-level Shape Variations with Text-to-Image Diffusion Models'' [paper](https://arxiv.org/abs/2303.11306).
15
  We introduce a method that generates object-level shape variation for a given image.
16
+ This demo supports both generated images and real images. To modify a real image, please upload it to the input image block and provide a prompt that describes its contents.
17
+
18
  '''
19
 
20
  stable, stable_config = setup(LPMConfig())
21
+ stable_for_inversion, _ = setup(LPMConfig())
22
 
23
  def main_pipeline(
24
  prompt: str,
 
32
  seed: int,
33
  input_image: str):
34
  prompt = prompt.replace(object_of_interest, '{word}')
 
 
35
  proxy_words = proxy_words.split(',') if proxy_words != '' else []
36
  objects_to_preserve = objects_to_preserve.split(',') if objects_to_preserve != '' else []
37
  background_nouns = background_nouns.split(',') if background_nouns != '' else []
 
48
  real_image_path="" if input_image is None else input_image
49
  )
50
 
51
+ result_images, result_proxy_words = main(stable, stable_config, stable_for_inversion, args)
52
  result_images = [im.permute(1, 2, 0).cpu().numpy() for im in result_images]
53
  result_images = [(im * 255).astype(np.uint8) for im in result_images]
54
  result_images = [Image.fromarray(im) for im in result_images]
 
60
  gr.Markdown(DESCRIPTION)
61
 
62
  gr.HTML(
63
+ '''<a href="https://huggingface.co/spaces/orpatashnik/local-prompt-mixing?duplicate=true">
64
+ <img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key''')
65
 
66
  with gr.Row():
67
  with gr.Column():
main.py CHANGED
@@ -34,7 +34,7 @@ def setup(args):
34
  return ldm_stable, ldm_stable_config
35
 
36
 
37
- def main(ldm_stable, ldm_stable_config, args):
38
 
39
  similar_words, prompts, another_prompts = get_proxy_prompts(args, ldm_stable)
40
  exp_path = save_args_dict(args, similar_words)
@@ -44,7 +44,7 @@ def main(ldm_stable, ldm_stable_config, args):
44
  uncond_embeddings = None
45
 
46
  if args.real_image_path != "":
47
- x_t, uncond_embeddings = invert_image(args, ldm_stable, ldm_stable_config, prompts, exp_path)
48
 
49
  image, x_t, orig_all_latents, orig_mask, average_attention = generate_original_image(args, ldm_stable, ldm_stable_config, prompts, x_t, uncond_embeddings)
50
  save_image(ToTensor()(image[0]), f"{exp_path}/{similar_words[0]}.jpg")
 
34
  return ldm_stable, ldm_stable_config
35
 
36
 
37
+ def main(ldm_stable, ldm_stable_config, ldm_stable_inversion, args):
38
 
39
  similar_words, prompts, another_prompts = get_proxy_prompts(args, ldm_stable)
40
  exp_path = save_args_dict(args, similar_words)
 
44
  uncond_embeddings = None
45
 
46
  if args.real_image_path != "":
47
+ x_t, uncond_embeddings = invert_image(args, ldm_stable_inversion, ldm_stable_config, prompts, exp_path)
48
 
49
  image, x_t, orig_all_latents, orig_mask, average_attention = generate_original_image(args, ldm_stable, ldm_stable_config, prompts, x_t, uncond_embeddings)
50
  save_image(ToTensor()(image[0]), f"{exp_path}/{similar_words[0]}.jpg")