DmitrMakeev commited on
Commit
7abc78a
1 Parent(s): 06246a6

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -384
app.py DELETED
@@ -1,384 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import os
4
- import shutil
5
- import requests
6
- import subprocess
7
- from subprocess import getoutput
8
- from huggingface_hub import snapshot_download, HfApi, create_repo
9
- api = HfApi()
10
-
11
- hf_token = os.environ.get("HF_TOKEN_WITH_WRITE_PERMISSION")
12
-
13
- is_shared_ui = True if "fffiloni/train-dreambooth-lora-sdxl" in os.environ['SPACE_ID'] else False
14
-
15
- is_gpu_associated = torch.cuda.is_available()
16
-
17
- if is_gpu_associated:
18
- gpu_info = getoutput('nvidia-smi')
19
- if("A10G" in gpu_info):
20
- which_gpu = "A10G"
21
- elif("T4" in gpu_info):
22
- which_gpu = "T4"
23
- else:
24
- which_gpu = "CPU"
25
-
26
- def check_upload_or_no(value):
27
- if value is True:
28
- return gr.update(visible=True)
29
- else:
30
- return gr.update(visible=False)
31
-
32
- def load_images_to_dataset(images, dataset_name):
33
-
34
- if is_shared_ui:
35
- raise gr.Error("This Space only works in duplicated instances")
36
-
37
- if dataset_name == "":
38
- raise gr.Error("You forgot to name your new dataset. ")
39
-
40
- # Create the directory if it doesn't exist
41
- my_working_directory = f"my_working_directory_for_{dataset_name}"
42
- if not os.path.exists(my_working_directory):
43
- os.makedirs(my_working_directory)
44
-
45
- # Assuming 'images' is a list of image file paths
46
- for idx, image in enumerate(images):
47
- # Get the base file name (without path) from the original location
48
- image_name = os.path.basename(image.name)
49
-
50
- # Construct the destination path in the working directory
51
- destination_path = os.path.join(my_working_directory, image_name)
52
-
53
- # Copy the image from the original location to the working directory
54
- shutil.copy(image.name, destination_path)
55
-
56
- # Print the image name and its corresponding save path
57
- print(f"Image {idx + 1}: {image_name} copied to {destination_path}")
58
-
59
- path_to_folder = my_working_directory
60
- your_username = api.whoami(token=hf_token)["name"]
61
- repo_id = f"{your_username}/{dataset_name}"
62
- create_repo(repo_id=repo_id, repo_type="dataset", private=True, token=hf_token)
63
-
64
- api.upload_folder(
65
- folder_path=path_to_folder,
66
- repo_id=repo_id,
67
- repo_type="dataset",
68
- token=hf_token
69
- )
70
-
71
- return "Done, your dataset is ready and loaded for the training step!", repo_id
72
-
73
- def swap_hardware(hf_token, hardware="cpu-basic"):
74
- hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
75
- headers = { "authorization" : f"Bearer {hf_token}"}
76
- body = {'flavor': hardware}
77
- requests.post(hardware_url, json = body, headers=headers)
78
-
79
- def swap_sleep_time(hf_token,sleep_time):
80
- sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}/sleeptime"
81
- headers = { "authorization" : f"Bearer {hf_token}"}
82
- body = {'seconds':sleep_time}
83
- requests.post(sleep_time_url,json=body,headers=headers)
84
-
85
- def get_sleep_time(hf_token):
86
- sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}"
87
- headers = { "authorization" : f"Bearer {hf_token}"}
88
- response = requests.get(sleep_time_url,headers=headers)
89
- try:
90
- gcTimeout = response.json()['runtime']['gcTimeout']
91
- except:
92
- gcTimeout = None
93
- return gcTimeout
94
-
95
- def write_to_community(title, description,hf_token):
96
-
97
- api.create_discussion(repo_id=os.environ['SPACE_ID'], title=title, description=description,repo_type="space", token=hf_token)
98
-
99
-
100
- def set_accelerate_default_config():
101
- try:
102
- subprocess.run(["accelerate", "config", "default"], check=True)
103
- print("Accelerate default config set successfully!")
104
- except subprocess.CalledProcessError as e:
105
- print(f"An error occurred: {e}")
106
-
107
- def train_dreambooth_lora_sdxl(dataset_id, instance_data_dir, lora_trained_xl_folder, instance_prompt, max_train_steps, checkpoint_steps, remove_gpu):
108
-
109
- script_filename = "train_dreambooth_lora_sdxl.py" # Assuming it's in the same folder
110
-
111
- command = [
112
- "accelerate",
113
- "launch",
114
- script_filename, # Use the local script
115
- "--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
116
- "--pretrained_vae_model_name_or_path=madebyollin/sdxl-vae-fp16-fix",
117
- f"--dataset_id={dataset_id}",
118
- f"--instance_data_dir={instance_data_dir}",
119
- f"--output_dir={lora_trained_xl_folder}",
120
- "--mixed_precision=fp16",
121
- f"--instance_prompt={instance_prompt}",
122
- "--resolution=1024",
123
- "--train_batch_size=2",
124
- "--gradient_accumulation_steps=2",
125
- "--gradient_checkpointing",
126
- "--learning_rate=1e-4",
127
- "--lr_scheduler=constant",
128
- "--lr_warmup_steps=0",
129
- "--enable_xformers_memory_efficient_attention",
130
- "--mixed_precision=fp16",
131
- "--use_8bit_adam",
132
- f"--max_train_steps={max_train_steps}",
133
- f"--checkpointing_steps={checkpoint_steps}",
134
- "--seed=0",
135
- "--push_to_hub",
136
- f"--hub_token={hf_token}"
137
- ]
138
-
139
- try:
140
- subprocess.run(command, check=True)
141
- print("Training is finished!")
142
- if remove_gpu:
143
- swap_hardware(hf_token, "cpu-basic")
144
- except subprocess.CalledProcessError as e:
145
- print(f"An error occurred: {e}")
146
-
147
- title="There was an error on during your training"
148
- description=f'''
149
- Unfortunately there was an error during training your {lora_trained_xl_folder} model.
150
- Please check it out below. Feel free to report this issue to [SD-XL Dreambooth LoRa Training](https://huggingface.co/spaces/fffiloni/train-dreambooth-lora-sdxl):
151
- ```
152
- {str(e)}
153
- ```
154
- '''
155
- if remove_gpu:
156
- swap_hardware(hf_token, "cpu-basic")
157
- #write_to_community(title,description,hf_token)
158
-
159
- def main(dataset_id,
160
- lora_trained_xl_folder,
161
- instance_prompt,
162
- max_train_steps,
163
- checkpoint_steps,
164
- remove_gpu):
165
-
166
-
167
- if is_shared_ui:
168
- raise gr.Error("This Space only works in duplicated instances")
169
-
170
- if not is_gpu_associated:
171
- raise gr.Error("Please associate a T4 or A10G GPU for this Space")
172
-
173
- if dataset_id == "":
174
- raise gr.Error("You forgot to specify an image dataset")
175
-
176
- if instance_prompt == "":
177
- raise gr.Error("You forgot to specify a concept prompt")
178
-
179
- if lora_trained_xl_folder == "":
180
- raise gr.Error("You forgot to name the output folder for your model")
181
-
182
- sleep_time = get_sleep_time(hf_token)
183
- if sleep_time:
184
- swap_sleep_time(hf_token, -1)
185
-
186
- gr.Warning("If you did not check the `Remove GPU After training`, don't forget to remove the GPU attribution after you are done. ")
187
-
188
- dataset_repo = dataset_id
189
-
190
- # Automatically set local_dir based on the last part of dataset_repo
191
- repo_parts = dataset_repo.split("/")
192
- local_dir = f"./{repo_parts[-1]}" # Use the last part of the split
193
-
194
- # Check if the directory exists and create it if necessary
195
- if not os.path.exists(local_dir):
196
- os.makedirs(local_dir)
197
-
198
- gr.Info("Downloading dataset ...")
199
-
200
- snapshot_download(
201
- dataset_repo,
202
- local_dir=local_dir,
203
- repo_type="dataset",
204
- ignore_patterns=".gitattributes",
205
- token=hf_token
206
- )
207
-
208
- set_accelerate_default_config()
209
-
210
- gr.Info("Training begins ...")
211
-
212
- instance_data_dir = repo_parts[-1]
213
- train_dreambooth_lora_sdxl(dataset_id, instance_data_dir, lora_trained_xl_folder, instance_prompt, max_train_steps, checkpoint_steps, remove_gpu)
214
-
215
- your_username = api.whoami(token=hf_token)["name"]
216
- return f"Done, your trained model has been stored in your models library: {your_username}/{lora_trained_xl_folder}"
217
-
218
- css="""
219
- #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
220
- #upl-dataset-group {background-color: none!important;}
221
-
222
- div#warning-ready {
223
- background-color: #ecfdf5;
224
- padding: 0 10px 5px;
225
- margin: 20px 0;
226
- }
227
- div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
228
- color: #057857!important;
229
- }
230
-
231
- div#warning-duplicate {
232
- background-color: #ebf5ff;
233
- padding: 0 10px 5px;
234
- margin: 20px 0;
235
- }
236
-
237
- div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
238
- color: #0f4592!important;
239
- }
240
-
241
- div#warning-duplicate strong {
242
- color: #0f4592;
243
- }
244
-
245
- p.actions {
246
- display: flex;
247
- align-items: center;
248
- margin: 20px 0;
249
- }
250
-
251
- div#warning-duplicate .actions a {
252
- display: inline-block;
253
- margin-right: 10px;
254
- }
255
-
256
- div#warning-setgpu {
257
- background-color: #fff4eb;
258
- padding: 0 10px 5px;
259
- margin: 20px 0;
260
- }
261
-
262
- div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
263
- color: #92220f!important;
264
- }
265
-
266
- div#warning-setgpu a, div#warning-setgpu b {
267
- color: #91230f;
268
- }
269
-
270
- div#warning-setgpu p.actions > a {
271
- display: inline-block;
272
- background: #1f1f23;
273
- border-radius: 40px;
274
- padding: 6px 24px;
275
- color: antiquewhite;
276
- text-decoration: none;
277
- font-weight: 600;
278
- font-size: 1.2em;
279
- }
280
-
281
- button#load-dataset-btn{
282
- min-height: 60px;
283
- }
284
- """
285
-
286
- with gr.Blocks(css=css) as demo:
287
- with gr.Column(elem_id="col-container"):
288
- if is_shared_ui:
289
- top_description = gr.HTML(f'''
290
- <div class="gr-prose">
291
- <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
292
- Attention: this Space need to be duplicated to work</h2>
293
- <p class="main-message">
294
- To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (T4-small or A10G-small).<br />
295
- A T4 costs <strong>US$0.60/h</strong>, so it should cost < US$1 to train most models.
296
- </p>
297
- <p class="actions">
298
- <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
299
- <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
300
- </a>
301
- to start training your own image model
302
- </p>
303
- </div>
304
- ''', elem_id="warning-duplicate")
305
- else:
306
- if(is_gpu_associated):
307
- top_description = gr.HTML(f'''
308
- <div class="gr-prose">
309
- <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
310
- You have successfully associated a {which_gpu} GPU to the SD-XL Training Space 🎉</h2>
311
- <p>
312
- You can now train your model! You will be billed by the minute from when you activated the GPU until when it is turned off.
313
- </p>
314
- </div>
315
- ''', elem_id="warning-ready")
316
- else:
317
- top_description = gr.HTML(f'''
318
- <div class="gr-prose">
319
- <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
320
- You have successfully duplicated the SD-XL Training Space 🎉</h2>
321
- <p>There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4-small or A10G-small GPU</b> to it (via the Settings tab)</a> and run the training below.
322
- You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
323
- <p class="actions">
324
- <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">🔥 &nbsp; Set recommended GPU</a>
325
- </p>
326
- </div>
327
- ''', elem_id="warning-setgpu")
328
-
329
- gr.Markdown("# SD-XL Dreambooth LoRa Training UI 💭")
330
-
331
- upload_my_images = gr.Checkbox(label="Drop your training images ? (optional)", value=False)
332
- gr.Markdown("Use this step to upload your training images and create a new dataset. If you already have a dataset stored on your HF profile, you can skip this step, and provide your dataset ID in the training `Datased ID` input below.")
333
-
334
- with gr.Group(visible=False, elem_id="upl-dataset-group") as upload_group:
335
- with gr.Row():
336
- images = gr.File(file_types=["image"], label="Upload your images", file_count="multiple", interactive=True, visible=True)
337
- with gr.Column():
338
- new_dataset_name = gr.Textbox(label="Set new dataset name", placeholder="e.g.: my_awesome_dataset")
339
- dataset_status = gr.Textbox(label="dataset status")
340
- load_btn = gr.Button("Load images to new dataset", elem_id="load-dataset-btn")
341
-
342
- gr.Markdown("## Training ")
343
- gr.Markdown("You can use an existing image dataset, find a dataset example here: [https://huggingface.co/datasets/diffusers/dog-example](https://huggingface.co/datasets/diffusers/dog-example) ;)")
344
-
345
- with gr.Row():
346
- dataset_id = gr.Textbox(label="Dataset ID", info="use one of your previously uploaded image datasets on your HF profile", placeholder="diffusers/dog-example")
347
- instance_prompt = gr.Textbox(label="Concept prompt", info="concept prompt - use a unique, made up word to avoid collisions")
348
-
349
- with gr.Row():
350
- model_output_folder = gr.Textbox(label="Output model folder name", placeholder="lora-trained-xl-folder")
351
- max_train_steps = gr.Number(label="Max Training Steps", value=500, precision=0, step=10)
352
- checkpoint_steps = gr.Number(label="Checkpoints Steps", value=100, precision=0, step=10)
353
-
354
- remove_gpu = gr.Checkbox(label="Remove GPU After Training", value=True)
355
- train_button = gr.Button("Train !")
356
-
357
- train_status = gr.Textbox(label="Training status")
358
-
359
- upload_my_images.change(
360
- fn = check_upload_or_no,
361
- inputs =[upload_my_images],
362
- outputs = [upload_group]
363
- )
364
-
365
- load_btn.click(
366
- fn = load_images_to_dataset,
367
- inputs = [images, new_dataset_name],
368
- outputs = [dataset_status, dataset_id]
369
- )
370
-
371
- train_button.click(
372
- fn = main,
373
- inputs = [
374
- dataset_id,
375
- model_output_folder,
376
- instance_prompt,
377
- max_train_steps,
378
- checkpoint_steps,
379
- remove_gpu
380
- ],
381
- outputs = [train_status]
382
- )
383
-
384
- demo.queue(default_enabled=False).launch(debug=True)