Sreerama commited on
Commit
a00d107
1 Parent(s): dc54f82

remove swap_text function

Browse files
Files changed (1) hide show
  1. app.py +0 -23
app.py CHANGED
@@ -40,28 +40,6 @@ if(is_gpu_associated):
40
  with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
41
  zip_ref.extractall(".")
42
 
43
- def swap_text(option, base):
44
- resize_width = 768 if base == "v2-768" else 512
45
- mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:"
46
- if(option == "object"):
47
- instance_prompt_example = "cttoy"
48
- freeze_for = 30
49
- return [f"You are going to train `object`(s), upload 5-10 images of each object you are planning on training on from different angles/perspectives. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. {mandatory_liability}:", '''<img src="file/cat-toy.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}.", freeze_for, gr.update(visible=False)]
50
- elif(option == "person"):
51
- instance_prompt_example = "julcto"
52
- freeze_for = 70
53
- #show_prior_preservation = True if base != "v2-768" else False
54
- show_prior_preservation=False
55
- if(show_prior_preservation):
56
- prior_preservation_box_update = gr.update(visible=show_prior_preservation)
57
- else:
58
- prior_preservation_box_update = gr.update(visible=show_prior_preservation, value=False)
59
- return [f"You are going to train a `person`(s), upload 10-20 images of each person you are planning on training on from different angles/perspectives. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. {mandatory_liability}:", '''<img src="file/person.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}.", freeze_for, prior_preservation_box_update]
60
- elif(option == "style"):
61
- instance_prompt_example = "trsldamrl"
62
- freeze_for = 10
63
- return [f"You are going to train a `style`, upload 10-20 images of the style you are planning on training on. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. Name the files with the words you would like {mandatory_liability}:", '''<img src="file/trsl_style.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}", freeze_for, gr.update(visible=False)]
64
-
65
  def swap_base_model(selected_model):
66
  if(is_gpu_associated):
67
  global model_to_load
@@ -606,7 +584,6 @@ with gr.Blocks(css=css) as demo:
606
  #Swap the examples and the % of text encoder trained depending if it is an object, person or style
607
 
608
  #Swap the base model
609
- base_model_to_use.change(fn=swap_text, inputs=[base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False)
610
  base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[])
611
 
612
  #Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not
 
40
  with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
41
  zip_ref.extractall(".")
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  def swap_base_model(selected_model):
44
  if(is_gpu_associated):
45
  global model_to_load
 
584
  #Swap the examples and the % of text encoder trained depending if it is an object, person or style
585
 
586
  #Swap the base model
 
587
  base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[])
588
 
589
  #Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not