aimersion commited on
Commit
8f879f7
1 Parent(s): 819389a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -13
app.py CHANGED
@@ -10,12 +10,13 @@ dtype = torch.float16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
  # Load the diffusion pipeline
13
- pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
 
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
16
  MAX_IMAGE_SIZE = 2048
17
 
18
- def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidance_scale=7.5, progress=gr.Progress(track_tqdm=True)):
19
  start_time = time.time()
20
 
21
  if width > MAX_IMAGE_SIZE or height > MAX_IMAGE_SIZE:
@@ -28,7 +29,6 @@ def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024,
28
  try:
29
  image = pipe(
30
  prompt=prompt,
31
- negative_prompt=negative_prompt,
32
  width=width,
33
  height=height,
34
  num_inference_steps=num_inference_steps,
@@ -45,9 +45,9 @@ def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024,
45
  return image, seed, None
46
 
47
  examples = [
48
- ["a tiny astronaut hatching from an egg on the moon", "blurry, low quality"],
49
- ["a cat holding a sign that says hello world", "dog, text, writing"],
50
- ["an anime illustration of a wiener schnitzel", "realistic, photograph"],
51
  ]
52
 
53
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
@@ -64,11 +64,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
64
  placeholder="Enter your prompt",
65
  lines=3
66
  )
67
- negative_prompt = gr.Textbox(
68
- label="Negative Prompt",
69
- placeholder="Enter things to avoid in the image",
70
- lines=2
71
- )
72
  run_button = gr.Button("Generate Image", variant="primary")
73
 
74
  with gr.Column(scale=2):
@@ -89,7 +84,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
89
 
90
  gr.Examples(
91
  examples=examples,
92
- inputs=[prompt, negative_prompt],
93
  outputs=[result, seed_output],
94
  fn=infer,
95
  cache_examples=True
@@ -97,7 +92,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
97
 
98
  run_button.click(
99
  fn=infer,
100
- inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, num_inference_steps, guidance_scale],
101
  outputs=[result, seed_output]
102
  )
103
 
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
  # Load the diffusion pipeline
13
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype)
14
+ pipe = pipe.to(device)
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 2048
18
 
19
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidance_scale=7.5, progress=gr.Progress(track_tqdm=True)):
20
  start_time = time.time()
21
 
22
  if width > MAX_IMAGE_SIZE or height > MAX_IMAGE_SIZE:
 
29
  try:
30
  image = pipe(
31
  prompt=prompt,
 
32
  width=width,
33
  height=height,
34
  num_inference_steps=num_inference_steps,
 
45
  return image, seed, None
46
 
47
  examples = [
48
+ ["a tiny astronaut hatching from an egg on the moon"],
49
+ ["a cat holding a sign that says hello world"],
50
+ ["an anime illustration of a wiener schnitzel"],
51
  ]
52
 
53
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
 
64
  placeholder="Enter your prompt",
65
  lines=3
66
  )
 
 
 
 
 
67
  run_button = gr.Button("Generate Image", variant="primary")
68
 
69
  with gr.Column(scale=2):
 
84
 
85
  gr.Examples(
86
  examples=examples,
87
+ inputs=[prompt],
88
  outputs=[result, seed_output],
89
  fn=infer,
90
  cache_examples=True
 
92
 
93
  run_button.click(
94
  fn=infer,
95
+ inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps, guidance_scale],
96
  outputs=[result, seed_output]
97
  )
98