airplane194 commited on
Commit
89e0ec3
·
1 Parent(s): 0e674ea

Cleaning up the code

Browse files
Files changed (2) hide show
  1. app.py +2 -6
  2. python.py +26 -28
app.py CHANGED
@@ -25,15 +25,13 @@ MAX_IMAGE_SIZE = 2048 # not used anymore
25
 
26
  # Bind the custom method
27
  # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
28
- python.model_loading()
29
 
30
 
31
  @spaces.GPU()
32
  def infer(prompt, seed=42, randomize_seed=False, aspect_ratio="4:3 landscape 1152x896", lora_weight="lora_weight_rank_32_alpha_32.safetensors",
33
  guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
34
- # Default height + width
35
- width, height = 1024, 1024
36
-
37
  # Randomize seed if requested
38
  if randomize_seed:
39
  seed = random.randint(0, MAX_SEED)
@@ -46,8 +44,6 @@ def infer(prompt, seed=42, randomize_seed=False, aspect_ratio="4:3 landscape 115
46
  torch.cuda.empty_cache()
47
  image, seed = python.generate_image(
48
  prompt,
49
- height,
50
- width,
51
  guidance_scale,
52
  aspect_ratio,
53
  seed,
 
25
 
26
  # Bind the custom method
27
  # pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
28
+ # python.model_loading()
29
 
30
 
31
  @spaces.GPU()
32
  def infer(prompt, seed=42, randomize_seed=False, aspect_ratio="4:3 landscape 1152x896", lora_weight="lora_weight_rank_32_alpha_32.safetensors",
33
  guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
34
+
 
 
35
  # Randomize seed if requested
36
  if randomize_seed:
37
  seed = random.randint(0, MAX_SEED)
 
44
  torch.cuda.empty_cache()
45
  image, seed = python.generate_image(
46
  prompt,
 
 
47
  guidance_scale,
48
  aspect_ratio,
49
  seed,
python.py CHANGED
@@ -131,16 +131,16 @@ import_custom_nodes()
131
  # add_extra_model_paths()
132
  dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
133
  dualcliploader_11 = dualcliploader.load_clip(
134
- clip_name1="t5xxl_fp16.safetensors",
135
- clip_name2="clip_l.safetensors",
136
- type="flux",
137
- device="default",
138
  )
139
 
140
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
141
  cliptextencode_6 = cliptextencode.encode(
142
- text="Photo on a small glass panel. Color. Photo of trees with a body of water in the front and moutain in the background.",
143
- clip=get_value_at_index(dualcliploader_11, 0),
144
  )
145
 
146
  vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
@@ -148,7 +148,7 @@ vaeloader_10 = vaeloader.load_vae(vae_name="ae.safetensors")
148
 
149
  unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
150
  unetloader_12 = unetloader.load_unet(
151
- unet_name="flux1-dev.safetensors", weight_dtype="default"
152
  )
153
 
154
  ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
@@ -159,19 +159,19 @@ randomnoise_25 = randomnoise.get_noise(noise_seed='42')
159
 
160
  loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
161
  loraloadermodelonly_72 = loraloadermodelonly.load_lora_model_only(
162
- lora_name='lora_weight_rank_32_alpha_32.safetensors',
163
- strength_model=1,
164
- model=get_value_at_index(unetloader_12, 0),
165
  )
166
 
167
  cr_sdxl_aspect_ratio = NODE_CLASS_MAPPINGS["CR SDXL Aspect Ratio"]()
168
  cr_sdxl_aspect_ratio_85 = cr_sdxl_aspect_ratio.Aspect_Ratio(
169
- width=1024,
170
- height=1024,
171
- aspect_ratio="4:3 landscape 1152x896",
172
- swap_dimensions="Off",
173
- upscale_factor=1.5,
174
- batch_size=1,
175
  )
176
 
177
  modelsamplingflux = NODE_CLASS_MAPPINGS["ModelSamplingFlux"]()
@@ -182,27 +182,25 @@ samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
182
  vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
183
  saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
184
 
185
- def model_loading():
186
- model_loaders = [dualcliploader_11, vaeloader_10, unetloader_12, loraloadermodelonly_72]
187
- valid_models = [
188
- getattr(loader[0], 'patcher', loader[0])
189
- for loader in model_loaders
190
- if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
191
- ]
192
- #Load the models
193
- # model_management.load_models_gpu(valid_models)
194
 
195
 
196
  def generate_image(prompt,
197
- height,
198
- width,
199
  guidance_scale,
200
  aspect_ratio,
201
  seed,
202
  num_inference_steps,
203
  lora_weight,
204
  ):
205
- print(seed)
206
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
207
  cliptextencode_6 = cliptextencode.encode(
208
  text=prompt,
 
131
  # add_extra_model_paths()
132
  dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
133
  dualcliploader_11 = dualcliploader.load_clip(
134
+ clip_name1="t5xxl_fp16.safetensors",
135
+ clip_name2="clip_l.safetensors",
136
+ type="flux",
137
+ device="default",
138
  )
139
 
140
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
141
  cliptextencode_6 = cliptextencode.encode(
142
+ text="Photo on a small glass panel. Color. Photo of trees with a body of water in the front and moutain in the background.",
143
+ clip=get_value_at_index(dualcliploader_11, 0),
144
  )
145
 
146
  vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
 
148
 
149
  unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
150
  unetloader_12 = unetloader.load_unet(
151
+ unet_name="flux1-dev.safetensors", weight_dtype="default"
152
  )
153
 
154
  ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
 
159
 
160
  loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
161
  loraloadermodelonly_72 = loraloadermodelonly.load_lora_model_only(
162
+ lora_name='lora_weight_rank_32_alpha_32.safetensors',
163
+ strength_model=1,
164
+ model=get_value_at_index(unetloader_12, 0),
165
  )
166
 
167
  cr_sdxl_aspect_ratio = NODE_CLASS_MAPPINGS["CR SDXL Aspect Ratio"]()
168
  cr_sdxl_aspect_ratio_85 = cr_sdxl_aspect_ratio.Aspect_Ratio(
169
+ width=1024,
170
+ height=1024,
171
+ aspect_ratio="4:3 landscape 1152x896",
172
+ swap_dimensions="Off",
173
+ upscale_factor=1.5,
174
+ batch_size=1,
175
  )
176
 
177
  modelsamplingflux = NODE_CLASS_MAPPINGS["ModelSamplingFlux"]()
 
182
  vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
183
  saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
184
 
185
+ # def model_loading():
186
+ # model_loaders = [dualcliploader_11, vaeloader_10, unetloader_12, loraloadermodelonly_72]
187
+ # valid_models = [
188
+ # getattr(loader[0], 'patcher', loader[0])
189
+ # for loader in model_loaders
190
+ # if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
191
+ # ]
192
+ # #Load the models
193
+ # # model_management.load_models_gpu(valid_models)
194
 
195
 
196
  def generate_image(prompt,
 
 
197
  guidance_scale,
198
  aspect_ratio,
199
  seed,
200
  num_inference_steps,
201
  lora_weight,
202
  ):
203
+ # print(seed)
204
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
205
  cliptextencode_6 = cliptextencode.encode(
206
  text=prompt,