multimodalart HF staff commited on
Commit
0fd53b3
1 Parent(s): 6468939

Release candidate fixes

Browse files
Files changed (1) hide show
  1. app.py +103 -19
app.py CHANGED
@@ -68,7 +68,7 @@ def count_files(*inputs):
68
  Training_Steps = file_counter*200*2
69
  else:
70
  Training_Steps = file_counter*200
71
- return(gr.update(visible=True, value=f"You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. This should take around {round(Training_Steps/1.5, 2)} seconds, or {round((Training_Steps/1.5)/3600, 2)} hours. As a reminder, the T4 GPU costs US$0.60 for 1h. Once training is over, don't forget to swap the hardware back to CPU."))
72
 
73
  def train(*inputs):
74
  torch.cuda.empty_cache()
@@ -83,6 +83,7 @@ def train(*inputs):
83
  if os.path.exists("instance_images"): shutil.rmtree('instance_images')
84
  if os.path.exists("diffusers_model.zip"): os.remove("diffusers_model.zip")
85
  if os.path.exists("model.ckpt"): os.remove("model.ckpt")
 
86
  file_counter = 0
87
  for i, input in enumerate(inputs):
88
  if(i < maximum_concepts-1):
@@ -110,6 +111,9 @@ def train(*inputs):
110
  os.makedirs('output_model',exist_ok=True)
111
  uses_custom = inputs[-1]
112
  type_of_thing = inputs[-4]
 
 
 
113
  if(uses_custom):
114
  Training_Steps = int(inputs[-3])
115
  Train_text_encoder_for = int(inputs[-2])
@@ -146,6 +150,8 @@ def train(*inputs):
146
  max_train_steps=Training_Steps,
147
  )
148
  print("Starting training...")
 
 
149
  run_training(args_general)
150
  gc.collect()
151
  torch.cuda.empty_cache()
@@ -157,7 +163,23 @@ def train(*inputs):
157
  with zipfile.ZipFile('diffusers_model.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
158
  zipdir('output_model/', zipf)
159
  print("Training completed!")
160
- return [gr.update(visible=True, value=["diffusers_model.zip"]), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
  def generate(prompt):
163
  torch.cuda.empty_cache()
@@ -168,7 +190,7 @@ def generate(prompt):
168
  image = pipe(prompt).images[0]
169
  return(image)
170
 
171
- def push(model_name, where_to_upload, hf_token):
172
  if(not os.path.exists("model.ckpt")):
173
  convert("output_model", "model.ckpt")
174
  from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
@@ -240,16 +262,51 @@ Sample pictures of:
240
  repo_id=model_id,
241
  token=hf_token
242
  )
 
 
 
 
 
 
243
  return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.zip", "model.ckpt"])]
244
 
245
  def convert_to_ckpt():
246
  convert("output_model", "model.ckpt")
247
  return gr.update(visible=True, value=["diffusers_model.zip", "model.ckpt"])
248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
  with gr.Blocks(css=css) as demo:
250
  with gr.Box():
251
  if "IS_SHARED_UI" in os.environ:
252
- gr.HTML(f'''
253
  <div class="gr-prose" style="max-width: 80%">
254
  <h2>Attention - This Space doesn't work in this shared UI</h2>
255
  <p>For it to work, you have to duplicate the Space and run it on your own profile using a (paid) private T4 GPU for training. As each T4 costs US$0.60/h, it should cost < US$1 to train a model with less than 100 images using default settings!&nbsp;&nbsp;<a class="duplicate-button" style="display:inline-block" href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
@@ -258,18 +315,20 @@ with gr.Blocks(css=css) as demo:
258
  </div>
259
  ''')
260
  else:
261
- gr.HTML(f'''
262
  <div class="gr-prose" style="max-width: 80%">
263
- <h2>You have successfully duplicated the Dreambooth Training Space</h2>
264
  <p>If you haven't already, <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">attribute a T4 GPU to it (via the Settings tab)</a> and run the training below. You will be billed by the minute from when you activate the GPU until when you turn it off.</p>
265
  </div>
266
  ''')
267
  gr.Markdown("# Dreambooth training")
268
  gr.Markdown("Customize Stable Diffusion by giving it a few examples. You can train up to three concepts by providing examples for each. This Space is based on TheLastBen's [fast-DreamBooth Colab](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) with [🧨 diffusers](https://github.com/huggingface/diffusers)")
269
- with gr.Row():
 
270
  type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
271
-
272
- with gr.Row():
 
273
  with gr.Column():
274
  thing_description = gr.Markdown("You are going to train an `object`, please upload 5-10 images of the object you are planning on training on from different angles/perspectives. You must have the right to do so and you are liable for the images you use, example:")
275
  thing_image_example = gr.HTML('''<img src="file/cat-toy.png" />''')
@@ -323,16 +382,20 @@ with gr.Blocks(css=css) as demo:
323
  steps = gr.Number(label="How many steps", value=800)
324
  perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
325
 
326
- type_of_thing.change(fn=swap_text, inputs=[type_of_thing], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder], queue=False)
327
- training_summary = gr.Textbox("", visible=False, label="Training Summary")
328
- steps.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
329
- perc_txt_encoder.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
 
 
330
 
331
- for file in file_collection:
332
- file.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary], queue=False)
333
  train_btn = gr.Button("Start Training")
334
 
335
- completed_training = gr.Markdown("# Training completed", visible=False)
 
 
 
 
336
 
337
  with gr.Row():
338
  with gr.Box(visible=False) as try_your_model:
@@ -346,15 +409,36 @@ with gr.Blocks(css=css) as demo:
346
  model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style")
347
  where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to")
348
  gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.")
349
- hf_token = gr.Textbox(label="Hugging Face Write Token")
350
  push_button = gr.Button("Push to the Hub")
351
 
352
  result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
353
  success_message_upload = gr.Markdown(visible=False)
354
  convert_button = gr.Button("Convert to CKPT", visible=False)
355
-
356
- train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button, completed_training])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  generate_button.click(fn=generate, inputs=prompt, outputs=result_image)
 
358
  push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token], outputs=[success_message_upload, result])
 
359
  convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result)
 
 
360
  demo.launch(debug=True)
 
68
  Training_Steps = file_counter*200*2
69
  else:
70
  Training_Steps = file_counter*200
71
+ return([gr.update(visible=True), gr.update(visible=True, value=f"You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. This should take around {round(Training_Steps/1.5, 2)} seconds, or {round((Training_Steps/1.1)/3600, 2)} hours. As a reminder, the T4 GPU costs US$0.60 for 1h. Once training is over, don't forget to swap the hardware back to CPU.")])
72
 
73
  def train(*inputs):
74
  torch.cuda.empty_cache()
 
83
  if os.path.exists("instance_images"): shutil.rmtree('instance_images')
84
  if os.path.exists("diffusers_model.zip"): os.remove("diffusers_model.zip")
85
  if os.path.exists("model.ckpt"): os.remove("model.ckpt")
86
+ if os.path.exists("hastrained.success"): os.remove("hastrained.success")
87
  file_counter = 0
88
  for i, input in enumerate(inputs):
89
  if(i < maximum_concepts-1):
 
111
  os.makedirs('output_model',exist_ok=True)
112
  uses_custom = inputs[-1]
113
  type_of_thing = inputs[-4]
114
+ model_name = inputs[-7]
115
+ remove_attribution_after = inputs[-6]
116
+ hf_token = inputs[-5]
117
  if(uses_custom):
118
  Training_Steps = int(inputs[-3])
119
  Train_text_encoder_for = int(inputs[-2])
 
150
  max_train_steps=Training_Steps,
151
  )
152
  print("Starting training...")
153
+ lock_file = open("intraining.lock", "w")
154
+ lock_file.close()
155
  run_training(args_general)
156
  gc.collect()
157
  torch.cuda.empty_cache()
 
163
  with zipfile.ZipFile('diffusers_model.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
164
  zipdir('output_model/', zipf)
165
  print("Training completed!")
166
+ if os.path.exists("intraining.lock"): os.remove("intraining.lock")
167
+ trained_file = open("hastrained.success", "w")
168
+ trained_file.close()
169
+ if(remove_attribution_after):
170
+ push(model_name, "My personal profile", hf_token, True)
171
+ hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
172
+ headers = { "authorization" : f"Bearer {hf_token}"}
173
+ body = {'flavor': 'cpu-basic'}
174
+ requests.post(hardware_url, json = body, headers=headers)
175
+ return [
176
+ gr.update(visible=True, value=["diffusers_model.zip"]), #result
177
+ gr.update(visible=True), #try_your_model
178
+ gr.update(visible=True), #push_to_hub
179
+ gr.update(visible=True), #convert_button
180
+ gr.update(visible=False), #training_ongoing
181
+ gr.update(visible=True) #completed_training
182
+ ]
183
 
184
  def generate(prompt):
185
  torch.cuda.empty_cache()
 
190
  image = pipe(prompt).images[0]
191
  return(image)
192
 
193
+ def push(model_name, where_to_upload, hf_token, comes_from_automated=False):
194
  if(not os.path.exists("model.ckpt")):
195
  convert("output_model", "model.ckpt")
196
  from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
 
262
  repo_id=model_id,
263
  token=hf_token
264
  )
265
+ if(not comes_from_automated):
266
+ extra_message = "Don't forget to remove the GPU attribution after you play with it."
267
+ else:
268
+ extra_message = "The GPU has been removed automatically as requested, and you can try the model via the model page"
269
+ api.create_discussion(repo_id=os.environ['SPACE_ID'], title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!", description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}")
270
+
271
  return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.zip", "model.ckpt"])]
272
 
273
  def convert_to_ckpt():
274
  convert("output_model", "model.ckpt")
275
  return gr.update(visible=True, value=["diffusers_model.zip", "model.ckpt"])
276
 
277
+ def check_status():
278
+ if os.path.exists("hastrained.success"):
279
+ update_top_tag = gr.Update(value=f'''
280
+ <div class="gr-prose" style="max-width: 80%">
281
+ <h2>Your model has finished training ✅</h2>
282
+ <p>Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub). Once you are done, your model is safe, and you don't want to train a new one, go to the <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}">settings page</a> and downgrade your Space to a CPU Basic</p>
283
+ </div>
284
+ ''')
285
+ show_outputs = True
286
+ elif os.path.exists("intraining.lock"):
287
+ update_top_tag = gr.Update(value='''
288
+ <div class="gr-prose" style="max-width: 80%">
289
+ <h2>Don't worry, your model is still training! ⌛</h2>
290
+ <p>You closed the tab while your model was training, but it's all good! It is still training right now. You can click the "Open logs" button above here to check the training status. Once training is done, reload this tab to interact with your model</p>
291
+ </div>
292
+ ''')
293
+ show_outputs = False
294
+ return [
295
+ update_top_tag, #top_description
296
+ gr.update(visible=show_outputs), #try_your_model
297
+ gr.update(visible=show_outputs), #push_to_hub
298
+ gr.update(visible=show_outputs, value=["diffusers_model.zip"]), #result
299
+ gr.update(visible=show_outputs), #convert_button
300
+ ]
301
+
302
+ def checkbox_swap(checkbox):
303
+ reverse_bool = not checkbox
304
+ return [gr.update(visible=reverse_bool), gr.update(visible=reverse_bool), gr.update(visible=reverse_bool)]
305
+
306
  with gr.Blocks(css=css) as demo:
307
  with gr.Box():
308
  if "IS_SHARED_UI" in os.environ:
309
+ top_description = gr.HTML(f'''
310
  <div class="gr-prose" style="max-width: 80%">
311
  <h2>Attention - This Space doesn't work in this shared UI</h2>
312
  <p>For it to work, you have to duplicate the Space and run it on your own profile using a (paid) private T4 GPU for training. As each T4 costs US$0.60/h, it should cost < US$1 to train a model with less than 100 images using default settings!&nbsp;&nbsp;<a class="duplicate-button" style="display:inline-block" href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
 
315
  </div>
316
  ''')
317
  else:
318
+ top_description = gr.HTML(f'''
319
  <div class="gr-prose" style="max-width: 80%">
320
+ <h2>You have successfully duplicated the Dreambooth Training Space 🎉</h2>
321
  <p>If you haven't already, <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">attribute a T4 GPU to it (via the Settings tab)</a> and run the training below. You will be billed by the minute from when you activate the GPU until when you turn it off.</p>
322
  </div>
323
  ''')
324
  gr.Markdown("# Dreambooth training")
325
  gr.Markdown("Customize Stable Diffusion by giving it a few examples. You can train up to three concepts by providing examples for each. This Space is based on TheLastBen's [fast-DreamBooth Colab](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) with [🧨 diffusers](https://github.com/huggingface/diffusers)")
326
+
327
+ with gr.Row() as what_are_you_training:
328
  type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
329
+
330
+ #Very hacky approach to emulate dynamically created Gradio components
331
+ with gr.Row() as upload_your_concept:
332
  with gr.Column():
333
  thing_description = gr.Markdown("You are going to train an `object`, please upload 5-10 images of the object you are planning on training on from different angles/perspectives. You must have the right to do so and you are liable for the images you use, example:")
334
  thing_image_example = gr.HTML('''<img src="file/cat-toy.png" />''')
 
382
  steps = gr.Number(label="How many steps", value=800)
383
  perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
384
 
385
+ with gr.Box(visible=False) as training_summary:
386
+ training_summary_text = gr.Textbox("", visible=False, label="Training Summary")
387
+ training_summary_checkbox = gr.Checkbox("Remove GPU After - automatically remove paid GPU attribution and upload model to the Hugging Face Hub after training")
388
+ training_summary_model_name = gr.Textbox(label="Name of your model", visible=False)
389
+ training_summary_token_message = gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.", visible=False)
390
+ training_summary_token = gr.Textbox(label="Hugging Face Write Token", type="type", visible=False)
391
 
 
 
392
  train_btn = gr.Button("Start Training")
393
 
394
+ training_ongoing = gr.Markdown("## Training is ongoing ⌛... You can close this tab if you like or just wait. If you did not check `Remove GPU After`, you can come back here to try your model and upload it after training. Don't forget to remove the GPU attribution after you are done. ", visible=False)
395
+
396
+ #Post-training UI
397
+ completed_training = gr.Markdown('''# ✅ Training completed.
398
+ ### Don't forget to remove the GPU attribution after you are done trying and uploading your model''', visible=False)
399
 
400
  with gr.Row():
401
  with gr.Box(visible=False) as try_your_model:
 
409
  model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style")
410
  where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to")
411
  gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.")
412
+ hf_token = gr.Textbox(label="Hugging Face Write Token", type="password")
413
  push_button = gr.Button("Push to the Hub")
414
 
415
  result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
416
  success_message_upload = gr.Markdown(visible=False)
417
  convert_button = gr.Button("Convert to CKPT", visible=False)
418
+
419
+ #Swap the examples and the % of text encoder trained depending if it is an object, person or style
420
+ type_of_thing.change(fn=swap_text, inputs=[type_of_thing], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder], queue=False)
421
+
422
+ #Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not
423
+ for file in file_collection:
424
+ file.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
425
+ steps.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
426
+ perc_txt_encoder.change(fn=count_files, inputs=file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
427
+
428
+ #Give more options if the user wants to finish everything after training
429
+ training_summary_checkbox.change(fn=checkbox_swap, inputs=training_summary_checkbox, outputs=[training_summary_token_message, training_summary_token, training_summary_model_name])
430
+ #Add a message for while it is in training
431
+ train_btn.click(lambda:gr.Update(visible=True), inputs=None, outputs=training_ongoing)
432
+
433
+ #The main train function
434
+ train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[training_summary_model_name]+[training_summary_checkbox]+[training_summary_token]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button, training_ongoing, completed_training])
435
+
436
+ #Button to generate an image from your trained model after training
437
  generate_button.click(fn=generate, inputs=prompt, outputs=result_image)
438
+ #Button to push the model to the Hugging Face Hub
439
  push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token], outputs=[success_message_upload, result])
440
+ #Button to convert the model to ckpt format
441
  convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result)
442
+
443
+ demo.load(fn=check_status,inputs=None, outputs=[top_description, try_your_model, push_to_hub, result, convert_button])
444
  demo.launch(debug=True)