nigeljw commited on
Commit
671a051
1 Parent(s): 27ee5cf

Modified prompts

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -81,7 +81,7 @@ def Diffuse(latentWalk, generatorSeed, inputImage, mask, prompt, negativePrompt,
81
  if lastSeed != generatorSeed:
82
  generator = torch.Generator(device).manual_seed(generatorSeed)
83
  lastSeed = generatorSeed
84
-
85
  newImage = outpaintPipeline(prompt=prompt,
86
  negative_prompt=negativePrompt,
87
  image=inputImage,
@@ -118,10 +118,10 @@ latentWalkDesc = "This allows you to walk short spans across the latent space wi
118
  examplePrompt1 = "A person in a room" #A person in a room with colored hair"
119
  examplePrompt2 = "A person with colored hair" #"People in a room with colored hair"
120
  examplePrompt3 = "A person on a beach with long hair" #"A man on a beach with long hair"
121
- examplePrompt4 = "A person in a field under a night sky" #"A woman on a beach with long hair"
122
- examplePrompt5 = "A panda eating bamboo" #"A panda eating bamboo"
123
  examplePrompt6 = "A bird flying in the sky" #"A family together in a room"
124
- examplePrompt7 = "A Koala bear" #"A family together outside with colored hair"
125
 
126
  with gradio.Blocks(live=True) as ux:
127
  gradio.Markdown("This generative machine learning demonstration streams stable diffusion outpainting inference live from your camera on your computer or phone to expand your local reality and create an alternate world. High quality frame to frame determinism is a hard problem to solve for latent diffusion models as the generation is inherently relative to input noise distributions for the latents, and many factors such as the inherent Bayer noise from the camera images as well as anything that is altered between camera images (such as focus, white balance, etc) causes non-determinism between frames. Some methods apply spationtemporal attention, but this demonstration focuses on the control over the input latents to navigate the latent space. **Increase the lighting of your physical scene from your camera's perspective, and avoid self shadows of scene content, to improve the quality and consistency of the scene generation.**")
@@ -129,12 +129,14 @@ with gradio.Blocks(live=True) as ux:
129
  with gradio.Column():
130
  #staticLatents = gradio.Checkbox(label="Static Latents", info=staticLatentsDesc, value=True, interactive=True)
131
  inputImage = gradio.Image(label="Input Feed", source="webcam", shape=[512,512], streaming=True)
 
132
  mask = gradio.Image(label="Mask", type="pil", value=defaultMask)
133
  prompt = gradio.Textbox(label="Prompt", info=promptDesc, placeholder=examplePrompt1, value=examplePrompt1, lines=3)
134
  negativePrompt = gradio.Textbox(label="Negative Prompt", info=negPromptDesc, placeholder="Facial hair", value="Text, words", lines=3)
135
  guidanceScale = gradio.Slider(label="Guidance Scale", info="A higher value causes the generation to be more relative to the text prompt conditioning.", maximum=100, minimum=1, value=7.5, step= 0.1)
136
  numInferenceSteps = gradio.Slider(label="Number of Inference Steps", info=numInfStepsDesc, maximum=100, minimum=1, value=20, step=1)
137
  generatorSeed = gradio.Slider(label="Generator Seed", info=generatorSeedDesc, maximum=10000, minimum=1, value=lastSeed, step=1)
 
138
  #modelIndex = gradio.Dropdown(modelNames, label="Model", value="runwayml/stable-diffusion-inpainting")
139
  #inputImage.style(full_width=True)
140
 
@@ -149,13 +151,13 @@ with gradio.Blocks(live=True) as ux:
149
  generateLatents.click(GenerateNewLatentsForInference, outputs=latentWalk)
150
  inputImage.change(fn=Diffuse, inputs=inferenceInputs, outputs=outputImage, show_progress=False)
151
 
152
- examples = [[1.0, 1234, "assets/input/man.png", "assets/masks/diamond.png", examplePrompt1, "", 7.5, 20],
153
- [0.5, 2048, "assets/input/people.jpg", "assets/masks/star.png", examplePrompt2, "", 7.5, 15],
154
- [0.3, 8192, "assets/input/man.png", "assets/masks/sphere.png", examplePrompt3, "", 7.5, 25],
155
- [0.7, 1024, "assets/input/woman.jpg", "assets/masks/spiral.png", examplePrompt4, "", 7.5, 15],
156
- [1.0, 512, "assets/input/man.png", "assets/masks/square.png", examplePrompt5, "", 7.5, 10],
157
- [0.1, 256, "assets/input/family.jpg", "assets/masks/wave.png", examplePrompt6, "", 11.5, 30],
158
- [0.9, 9999, "assets/input/family.jpg", "assets/masks/maze.png", examplePrompt7, "", 17.5, 35],]
159
 
160
  inputExamples = gradio.Examples(examples, inputs=inferenceInputs, outputs=outputImage, fn=Diffuse)
161
 
 
81
  if lastSeed != generatorSeed:
82
  generator = torch.Generator(device).manual_seed(generatorSeed)
83
  lastSeed = generatorSeed
84
+
85
  newImage = outpaintPipeline(prompt=prompt,
86
  negative_prompt=negativePrompt,
87
  image=inputImage,
 
118
  examplePrompt1 = "A person in a room" #A person in a room with colored hair"
119
  examplePrompt2 = "A person with colored hair" #"People in a room with colored hair"
120
  examplePrompt3 = "A person on a beach with long hair" #"A man on a beach with long hair"
121
+ examplePrompt4 = "A person outside in a field under a starry night sky" #"A woman on a beach with long hair"
122
+ examplePrompt5 = "A person in a forest" #"A panda eating bamboo" #"A panda eating bamboo"
123
  examplePrompt6 = "A bird flying in the sky" #"A family together in a room"
124
+ examplePrompt7 = "A person in a room" #"A family together outside with colored hair"
125
 
126
  with gradio.Blocks(live=True) as ux:
127
  gradio.Markdown("This generative machine learning demonstration streams stable diffusion outpainting inference live from your camera on your computer or phone to expand your local reality and create an alternate world. High quality frame to frame determinism is a hard problem to solve for latent diffusion models as the generation is inherently relative to input noise distributions for the latents, and many factors such as the inherent Bayer noise from the camera images as well as anything that is altered between camera images (such as focus, white balance, etc) causes non-determinism between frames. Some methods apply spationtemporal attention, but this demonstration focuses on the control over the input latents to navigate the latent space. **Increase the lighting of your physical scene from your camera's perspective, and avoid self shadows of scene content, to improve the quality and consistency of the scene generation.**")
 
129
  with gradio.Column():
130
  #staticLatents = gradio.Checkbox(label="Static Latents", info=staticLatentsDesc, value=True, interactive=True)
131
  inputImage = gradio.Image(label="Input Feed", source="webcam", shape=[512,512], streaming=True)
132
+ #inputImage2 = gradio.Image(label="Input Feed 2", source="webcam", shape=[512,512], streaming=True)
133
  mask = gradio.Image(label="Mask", type="pil", value=defaultMask)
134
  prompt = gradio.Textbox(label="Prompt", info=promptDesc, placeholder=examplePrompt1, value=examplePrompt1, lines=3)
135
  negativePrompt = gradio.Textbox(label="Negative Prompt", info=negPromptDesc, placeholder="Facial hair", value="Text, words", lines=3)
136
  guidanceScale = gradio.Slider(label="Guidance Scale", info="A higher value causes the generation to be more relative to the text prompt conditioning.", maximum=100, minimum=1, value=7.5, step= 0.1)
137
  numInferenceSteps = gradio.Slider(label="Number of Inference Steps", info=numInfStepsDesc, maximum=100, minimum=1, value=20, step=1)
138
  generatorSeed = gradio.Slider(label="Generator Seed", info=generatorSeedDesc, maximum=10000, minimum=1, value=lastSeed, step=1)
139
+ #numViews = gradio.Slider(label="Number of Views", info="The number of discrete view perspectives to merge together in the view expansion.", maximum=100, minimum=1, value=1, step=1)
140
  #modelIndex = gradio.Dropdown(modelNames, label="Model", value="runwayml/stable-diffusion-inpainting")
141
  #inputImage.style(full_width=True)
142
 
 
151
  generateLatents.click(GenerateNewLatentsForInference, outputs=latentWalk)
152
  inputImage.change(fn=Diffuse, inputs=inferenceInputs, outputs=outputImage, show_progress=False)
153
 
154
+ examples = [[1.0, 1234, "assets/input/man.png","assets/masks/diamond.png", examplePrompt1, "", 7.5, 20, 1],
155
+ [0.5, 2048, "assets/input/woman.jpg", "assets/masks/star.png", examplePrompt2, "", 7.5, 15, 1],
156
+ [0.3, 8192, "assets/input/man.png", "assets/masks/sphere.png", examplePrompt3, "", 7.5, 25, 1],
157
+ [0.7, 1024, "assets/input/woman.jpg", "assets/masks/spiral.png", examplePrompt4, "", 7.5, 15, 1],
158
+ [1.0, 512, "assets/input/man.png", "assets/masks/square.png", examplePrompt5, "", 7.5, 10, 1],
159
+ [0.1, 256, "assets/input/woman.jpg", "assets/masks/wave.png", examplePrompt6, "", 11.5, 30, 1],
160
+ [0.9, 9999, "assets/input/man.png", "assets/masks/maze.png", examplePrompt7, "", 17.5, 35, 1],]
161
 
162
  inputExamples = gradio.Examples(examples, inputs=inferenceInputs, outputs=outputImage, fn=Diffuse)
163