Fabrice-TIERCELIN commited on
Commit
484cdbe
1 Parent(s): cb809e6

5 min duration

Browse files
Files changed (1) hide show
  1. app.py +29 -17
app.py CHANGED
@@ -3,8 +3,7 @@ import numpy as np
3
  import random
4
  import spaces
5
  import torch
6
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel, FluxPipeline
7
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
 
9
  dtype = torch.bfloat16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -15,10 +14,23 @@ MAX_SEED = np.iinfo(np.int32).max
15
  MAX_IMAGE_SIZE = 2048
16
 
17
 
18
- @spaces.GPU()
19
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=8, output_format="png", progress=gr.Progress(track_tqdm=True)):
20
  if randomize_seed:
21
  seed = random.randint(0, MAX_SEED)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  generator = torch.Generator().manual_seed(seed)
23
  image = pipe(
24
  prompt = prompt,
@@ -51,17 +63,9 @@ Merge by [Sayak Paul](https://huggingface.co/sayakpaul) of 2 of the 12B param re
51
  placeholder = "Enter your prompt",
52
  container = False
53
  )
54
-
55
- run_button = gr.Button(value = "Run", variant="primary")
56
-
57
- num_inference_steps = gr.Slider(
58
- label="Number of inference steps",
59
- minimum=1,
60
- maximum=50,
61
- step=1,
62
- value=4,
63
- )
64
-
65
  with gr.Accordion("Advanced Settings", open=False):
66
 
67
  with gr.Row():
@@ -81,6 +85,14 @@ Merge by [Sayak Paul](https://huggingface.co/sayakpaul) of 2 of the 12B param re
81
  step=32,
82
  value=1024,
83
  )
 
 
 
 
 
 
 
 
84
 
85
  guidance_scale = gr.Slider(
86
  label="Guidance Scale",
@@ -90,8 +102,6 @@ Merge by [Sayak Paul](https://huggingface.co/sayakpaul) of 2 of the 12B param re
90
  value=3.5,
91
  )
92
 
93
- output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
94
-
95
  seed = gr.Slider(
96
  label="Seed",
97
  minimum=0,
@@ -102,6 +112,8 @@ Merge by [Sayak Paul](https://huggingface.co/sayakpaul) of 2 of the 12B param re
102
 
103
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
104
 
 
 
105
  result = gr.Image(label="Result", show_label=False, format="png")
106
 
107
  gr.Examples(
 
3
  import random
4
  import spaces
5
  import torch
6
+ from diffusers import FluxPipeline
 
7
 
8
  dtype = torch.bfloat16
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
14
  MAX_IMAGE_SIZE = 2048
15
 
16
 
17
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=8, output_format="png"):
 
18
  if randomize_seed:
19
  seed = random.randint(0, MAX_SEED)
20
+ if width*height*num_inference_steps <= 1024*1024*8:
21
+ return infer_in_1min(prompt=prompt, seed=seed, randomize_seed=randomize_seed, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, output_format=output_format)
22
+ else:
23
+ return infer_in_5min(prompt=prompt, seed=seed, randomize_seed=randomize_seed, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, output_format=output_format)
24
+
25
+ @spaces.GPU(duration=60)
26
+ def infer_in_1min(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, output_format):
27
+ return infer_on_gpu(prompt=prompt, seed=seed, randomize_seed=randomize_seed, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, output_format=output_format)
28
+
29
+ @spaces.GPU(duration=300)
30
+ def infer_in_5min(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, output_format):
31
+ return infer_on_gpu(prompt=prompt, seed=seed, randomize_seed=randomize_seed, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, output_format=output_format)
32
+
33
+ def infer_on_gpu(prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, output_format, progress=gr.Progress(track_tqdm=True)):
34
  generator = torch.Generator().manual_seed(seed)
35
  image = pipe(
36
  prompt = prompt,
 
63
  placeholder = "Enter your prompt",
64
  container = False
65
  )
66
+
67
+ output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
68
+
 
 
 
 
 
 
 
 
69
  with gr.Accordion("Advanced Settings", open=False):
70
 
71
  with gr.Row():
 
85
  step=32,
86
  value=1024,
87
  )
88
+
89
+ num_inference_steps = gr.Slider(
90
+ label="Number of inference steps",
91
+ minimum=1,
92
+ maximum=50,
93
+ step=1,
94
+ value=4,
95
+ )
96
 
97
  guidance_scale = gr.Slider(
98
  label="Guidance Scale",
 
102
  value=3.5,
103
  )
104
 
 
 
105
  seed = gr.Slider(
106
  label="Seed",
107
  minimum=0,
 
112
 
113
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
114
 
115
+ run_button = gr.Button(value = "🚀 Generate", variant="primary")
116
+
117
  result = gr.Image(label="Result", show_label=False, format="png")
118
 
119
  gr.Examples(