Duskfallcrew commited on
Commit
47605f9
·
verified ·
1 Parent(s): e16c891

Update app.py

Browse files

Key changes:

Ko-fi link Added Modified the Markdown to add your Kofi information.

Temporary Working Directory: tempfile.TemporaryDirectory() is used, removing all colab associations.

No output Path Required: The text box output_path has been removed, allowing temporary allocation to occur within the space instead.

Files changed (1) hide show
  1. app.py +32 -36
app.py CHANGED
@@ -4,6 +4,7 @@ import torch
4
  from diffusers import StableDiffusionXLPipeline
5
  from huggingface_hub import HfApi, login
6
  from huggingface_hub.utils import validate_repo_id, HfHubHTTPError
 
7
  import re
8
  import json
9
  import glob
@@ -13,17 +14,6 @@ import subprocess
13
  from urllib.parse import urlparse, unquote
14
  from pathlib import Path
15
 
16
- # ---------------------- DEPENDENCIES ----------------------
17
-
18
- #No longer needed
19
- #def install_dependencies_gradio():
20
- # """Installs the necessary dependencies for the Gradio app. Run this ONCE."""
21
- # try:
22
- # !pip install -U torch diffusers transformers accelerate safetensors huggingface_hub xformers
23
- # print("Dependencies installed successfully.")
24
- # except Exception as e:
25
- # print(f"Error installing dependencies: {e}")
26
-
27
  # ---------------------- UTILITY FUNCTIONS ----------------------
28
 
29
  def get_save_dtype(save_precision_as):
@@ -168,7 +158,7 @@ def save_sdxl_as_diffusers(args, text_encoder1, text_encoder2, vae, unet, save_d
168
  with output_widget:
169
  print(f"Model saved as {save_dtype}.")
170
 
171
- def convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16, output_widget):
172
  """Main conversion function."""
173
  class Args: # Defining Args locally within convert_model
174
  def __init__(self, model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16):
@@ -177,28 +167,30 @@ def convert_model(model_to_load, save_precision_as, epoch, global_step, referenc
177
  self.epoch = epoch
178
  self.global_step = global_step
179
  self.reference_model = reference_model
180
- self.output_path = output_path
181
  self.fp16 = fp16
182
 
183
- args = Args(model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16)
184
- args.model_to_save = increment_filename(os.path.splitext(args.model_to_load)[0] + ".safetensors")
 
 
185
 
186
- try:
187
- load_dtype = torch.float16 if fp16 else None
188
- save_dtype = get_save_dtype(save_precision_as)
189
 
190
- is_load_checkpoint = determine_load_checkpoint(model_to_load)
191
- is_save_checkpoint = not is_load_checkpoint # reverse of load model
192
 
193
- loaded_model_data = load_sdxl_model(args, is_load_checkpoint, load_dtype, output_widget)
194
- convert_and_save_sdxl_model(args, is_save_checkpoint, loaded_model_data, save_dtype, output_widget)
195
 
196
- with output_widget:
197
- return f"Conversion complete. Model saved to {args.model_to_save}"
198
 
199
- except Exception as e:
200
- with output_widget:
201
- return f"Conversion failed: {e}"
202
 
203
  def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private, output_widget):
204
  """Uploads a model to the Hugging Face Hub."""
@@ -250,23 +242,27 @@ def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_priv
250
 
251
  # ---------------------- GRADIO INTERFACE ----------------------
252
 
253
- def main(model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16, hf_token, orgs_name, model_name, make_private):
254
  """Main function orchestrating the entire process."""
255
  output = gr.Markdown()
256
 
257
- conversion_output = convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16, output)
 
 
 
 
258
 
259
- upload_output = upload_to_huggingface(output_path, hf_token, orgs_name, model_name, make_private, output)
260
 
261
- # Return a combined output
262
- return f"{conversion_output}\n\n{upload_output}"
263
 
264
  with gr.Blocks() as demo:
265
 
266
  # Add initial warnings (only once)
267
- gr.Markdown("""
268
  ## **⚠️ IMPORTANT WARNINGS ⚠️**
269
- This App is Coded by an LLM partially, and for more information please go here: [Ktiseos Nyx](https://github.com/Ktiseos-Nyx/Sdxl-to-diffusers). The colab edition of this may indeed break AUP. This space is running on CPU and in theory SHOULD work, but may be slow. Earth and Dusk/ Ktiseos Nyx does not have the enterprise budget for ZERO GPU or any gpu sadly! Thank you to the community, John6666 especially for coming to aid when gemini would NOT fix the requirements.
270
  """)
271
 
272
  model_to_load = gr.Textbox(label="Model to Load (Checkpoint or Diffusers)", placeholder="Path to model")
@@ -281,7 +277,7 @@ with gr.Blocks() as demo:
281
 
282
  reference_model = gr.Textbox(label="Reference Diffusers Model",
283
  placeholder="e.g., stabilityai/stable-diffusion-xl-base-1.0")
284
- output_path = gr.Textbox(label="Output Path", value="/content/output")
285
 
286
  gr.Markdown("## Hugging Face Hub Configuration")
287
  hf_token = gr.Textbox(label="Hugging Face Token", placeholder="Your Hugging Face write token")
@@ -295,7 +291,7 @@ with gr.Blocks() as demo:
295
 
296
  convert_button.click(fn=main,
297
  inputs=[model_to_load, save_precision_as, epoch, global_step, reference_model,
298
- output_path, fp16, hf_token, orgs_name, model_name, make_private],
299
  outputs=output)
300
 
301
  demo.launch()
 
4
  from diffusers import StableDiffusionXLPipeline
5
  from huggingface_hub import HfApi, login
6
  from huggingface_hub.utils import validate_repo_id, HfHubHTTPError
7
+ import tempfile
8
  import re
9
  import json
10
  import glob
 
14
  from urllib.parse import urlparse, unquote
15
  from pathlib import Path
16
 
 
 
 
 
 
 
 
 
 
 
 
17
  # ---------------------- UTILITY FUNCTIONS ----------------------
18
 
19
  def get_save_dtype(save_precision_as):
 
158
  with output_widget:
159
  print(f"Model saved as {save_dtype}.")
160
 
161
+ def convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, output_widget):
162
  """Main conversion function."""
163
  class Args: # Defining Args locally within convert_model
164
  def __init__(self, model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16):
 
167
  self.epoch = epoch
168
  self.global_step = global_step
169
  self.reference_model = reference_model
170
+ self.output_path = output_path #Using output_path even if hardcoded
171
  self.fp16 = fp16
172
 
173
+ # Create a temporary directory for output
174
+ with tempfile.TemporaryDirectory() as tmpdirname:
175
+ args = Args(model_to_load, save_precision_as, epoch, global_step, reference_model, tmpdirname, fp16)
176
+ args.model_to_save = increment_filename(os.path.splitext(args.model_to_load)[0] + ".safetensors")
177
 
178
+ try:
179
+ load_dtype = torch.float16 if fp16 else None
180
+ save_dtype = get_save_dtype(save_precision_as)
181
 
182
+ is_load_checkpoint = determine_load_checkpoint(model_to_load)
183
+ is_save_checkpoint = not is_load_checkpoint # reverse of load model
184
 
185
+ loaded_model_data = load_sdxl_model(args, is_load_checkpoint, load_dtype, output_widget)
186
+ convert_and_save_sdxl_model(args, is_save_checkpoint, loaded_model_data, save_dtype, output_widget)
187
 
188
+ with output_widget:
189
+ return f"Conversion complete. Model saved to {args.model_to_save}"
190
 
191
+ except Exception as e:
192
+ with output_widget:
193
+ return f"Conversion failed: {e}"
194
 
195
  def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private, output_widget):
196
  """Uploads a model to the Hugging Face Hub."""
 
242
 
243
  # ---------------------- GRADIO INTERFACE ----------------------
244
 
245
+ def main(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, hf_token, orgs_name, model_name, make_private):
246
  """Main function orchestrating the entire process."""
247
  output = gr.Markdown()
248
 
249
+ # Hardcode output_path
250
+ #output_path = "./converted_model" ##This is incorrect! This will save to current working directory, which isnt ideal.
251
+ # Create tempdir, will only be there for the function
252
+ with tempfile.TemporaryDirectory() as output_path:
253
+ conversion_output = convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, output)
254
 
255
+ upload_output = upload_to_huggingface(output_path, hf_token, orgs_name, model_name, make_private, output)
256
 
257
+ # Return a combined output
258
+ return f"{conversion_output}\n\n{upload_output}"
259
 
260
  with gr.Blocks() as demo:
261
 
262
  # Add initial warnings (only once)
263
+ gr.Markdown(f"""
264
  ## **⚠️ IMPORTANT WARNINGS ⚠️**
265
+ This App is Coded by an LLM partially, and for more information please go here: [Ktiseos Nyx](https://github.com/Ktiseos-Nyx/Sdxl-to-diffusers). The colab edition of this may indeed break AUP. This space is running on CPU and in theory SHOULD work, but may be slow. Earth and Dusk/ Ktiseos Nyx does not have the enterprise budget for ZERO GPU or any gpu sadly! Thank you to the community, John6666 especially for coming to aid when gemini would NOT fix the requirements. Support Ktiseos Nyx & Myself on Ko-fi: [![Ko-fi](https://img.shields.io/badge/Support%20me%20on%20Ko--fi-F16061?logo=ko-fi&logoColor=white&style=flat)](https://ko-fi.com/Z8Z8L4EO)
266
  """)
267
 
268
  model_to_load = gr.Textbox(label="Model to Load (Checkpoint or Diffusers)", placeholder="Path to model")
 
277
 
278
  reference_model = gr.Textbox(label="Reference Diffusers Model",
279
  placeholder="e.g., stabilityai/stable-diffusion-xl-base-1.0")
280
+ #output_path = gr.Textbox(label="Output Path", value="./converted_model") #Remove text box - using temp file approach
281
 
282
  gr.Markdown("## Hugging Face Hub Configuration")
283
  hf_token = gr.Textbox(label="Hugging Face Token", placeholder="Your Hugging Face write token")
 
291
 
292
  convert_button.click(fn=main,
293
  inputs=[model_to_load, save_precision_as, epoch, global_step, reference_model,
294
+ fp16, hf_token, orgs_name, model_name, make_private],
295
  outputs=output)
296
 
297
  demo.launch()