rafaaa2105 commited on
Commit
56c7207
1 Parent(s): e4a91d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -19
app.py CHANGED
@@ -2,9 +2,9 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
  import os
5
-
6
- import spaces #[uncomment to use ZeroGPU]
7
- from diffusers import AutoPipelineForText2Image, AutoencoderTiny
8
  import torch
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -17,7 +17,7 @@ else:
17
  dtype = torch.float32
18
 
19
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
20
- pipe = pipeline = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.1-dev", token=hf_token, torch_dtype=torch.bfloat16)
21
  pipe.load_lora_weights('aleksa-codes/flux-ghibsky-illustration', weight_name='lora.safetensors')
22
  pipe = pipe.to(device)
23
 
@@ -25,7 +25,7 @@ MAX_SEED = np.iinfo(np.int32).max
25
  MAX_IMAGE_SIZE = 1024
26
 
27
 
28
- @spaces.GPU #[uncomment to use ZeroGPU]
29
  def infer(
30
  prompt,
31
  seed=42,
@@ -41,16 +41,17 @@ def infer(
41
 
42
  generator = torch.Generator().manual_seed(seed)
43
 
44
- image = pipe(
45
- prompt=prompt,
46
- guidance_scale=guidance_scale,
47
- num_inference_steps=num_inference_steps,
48
- width=width,
49
- height=height,
50
- generator=generator,
51
- ).images[0]
52
-
53
- return image, seed
 
54
 
55
 
56
  examples = [
@@ -100,7 +101,7 @@ with gr.Blocks(css=css) as demo:
100
  minimum=256,
101
  maximum=MAX_IMAGE_SIZE,
102
  step=32,
103
- value=1024, # Replace with defaults that work for your model
104
  )
105
 
106
  height = gr.Slider(
@@ -108,7 +109,7 @@ with gr.Blocks(css=css) as demo:
108
  minimum=256,
109
  maximum=MAX_IMAGE_SIZE,
110
  step=32,
111
- value=1024, # Replace with defaults that work for your model
112
  )
113
 
114
  with gr.Row():
@@ -117,7 +118,7 @@ with gr.Blocks(css=css) as demo:
117
  minimum=0.0,
118
  maximum=10.0,
119
  step=0.1,
120
- value=3.5, # Replace with defaults that work for your model
121
  )
122
 
123
  num_inference_steps = gr.Slider(
@@ -125,7 +126,7 @@ with gr.Blocks(css=css) as demo:
125
  minimum=1,
126
  maximum=50,
127
  step=1,
128
- value=28, # Replace with defaults that work for your model
129
  )
130
 
131
  gr.Examples(examples=examples, inputs=[prompt])
 
2
  import numpy as np
3
  import random
4
  import os
5
+ from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
6
+ import spaces
7
+ DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
8
  import torch
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
17
  dtype = torch.float32
18
 
19
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
20
+ pipe = pipeline = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", token=hf_token, torch_dtype=torch.bfloat16)
21
  pipe.load_lora_weights('aleksa-codes/flux-ghibsky-illustration', weight_name='lora.safetensors')
22
  pipe = pipe.to(device)
23
 
 
25
  MAX_IMAGE_SIZE = 1024
26
 
27
 
28
+ @spaces.GPU
29
  def infer(
30
  prompt,
31
  seed=42,
 
41
 
42
  generator = torch.Generator().manual_seed(seed)
43
 
44
+ for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
45
+ prompt=prompt,
46
+ guidance_scale=guidance_scale,
47
+ num_inference_steps=num_inference_steps,
48
+ width=width,
49
+ height=height,
50
+ generator=generator,
51
+ output_type="pil",
52
+ good_vae=good_vae,
53
+ ):
54
+ yield img, seed
55
 
56
 
57
  examples = [
 
101
  minimum=256,
102
  maximum=MAX_IMAGE_SIZE,
103
  step=32,
104
+ value=1024,
105
  )
106
 
107
  height = gr.Slider(
 
109
  minimum=256,
110
  maximum=MAX_IMAGE_SIZE,
111
  step=32,
112
+ value=1024,
113
  )
114
 
115
  with gr.Row():
 
118
  minimum=0.0,
119
  maximum=10.0,
120
  step=0.1,
121
+ value=3.5,
122
  )
123
 
124
  num_inference_steps = gr.Slider(
 
126
  minimum=1,
127
  maximum=50,
128
  step=1,
129
+ value=28,
130
  )
131
 
132
  gr.Examples(examples=examples, inputs=[prompt])