multimodalart HF staff commited on
Commit
6c25e6b
1 Parent(s): cab8b2c

Swap textbox to html

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -68,7 +68,9 @@ def count_files(*inputs):
68
  Training_Steps = file_counter*200*2
69
  else:
70
  Training_Steps = file_counter*200
71
- return([gr.update(visible=True), gr.update(visible=True, value=f"You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/1.1, 2)} seconds, or {round((Training_Steps/1.1)/60, 2)} minutes. The setup, compression and uploading the model can take up to 20 minutes. As the T4-Small GPU costs US$0.60 for 1h, the estimated cost for this training is below US${round((((Training_Steps/1.1)/3600)+0.3+0.1)*0.60, 2)}. If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not don't forget to come back here and swap the hardware back to CPU.")])
 
 
72
 
73
  def train(*inputs):
74
  torch.cuda.empty_cache()
@@ -386,8 +388,8 @@ with gr.Blocks(css=css) as demo:
386
  perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
387
 
388
  with gr.Box(visible=False) as training_summary:
389
- training_summary_text = gr.Textbox("", visible=False, label="Training Summary")
390
- training_summary_checkbox = gr.Checkbox("Remove GPU After - automatically remove paid GPU attribution and upload model to the Hugging Face Hub after training")
391
  training_summary_model_name = gr.Textbox(label="Name of your model", visible=False)
392
  training_summary_token_message = gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.", visible=False)
393
  training_summary_token = gr.Textbox(label="Hugging Face Write Token", type="password", visible=False)
 
68
  Training_Steps = file_counter*200*2
69
  else:
70
  Training_Steps = file_counter*200
71
+ return([gr.update(visible=True), gr.update(visible=True, value=f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/1.1, 2)} seconds, or {round((Training_Steps/1.1)/60, 2)} minutes.<br>
72
+ The setup, compression and uploading the model can take up to 20 minutes. As the T4-Small GPU costs US$0.60 for 1h, <b>the estimated cost for this training is <US${round((((Training_Steps/1.1)/3600)+0.3+0.1)*0.60, 2)}.</b><br>
73
+ If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.''')])
74
 
75
  def train(*inputs):
76
  torch.cuda.empty_cache()
 
388
  perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
389
 
390
  with gr.Box(visible=False) as training_summary:
391
+ training_summary_text = gr.HTML("", visible=False, label="Training Summary")
392
+ training_summary_checkbox = gr.Checkbox(label="Automatically remove paid GPU attribution and upload model to the Hugging Face Hub after training", value=False)
393
  training_summary_model_name = gr.Textbox(label="Name of your model", visible=False)
394
  training_summary_token_message = gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.", visible=False)
395
  training_summary_token = gr.Textbox(label="Hugging Face Write Token", type="password", visible=False)