multimodalart HF staff commited on
Commit
b3d3b2f
1 Parent(s): dbfd73e

Add resize and output

Browse files
Files changed (1) hide show
  1. app.py +23 -5
app.py CHANGED
@@ -4,6 +4,7 @@ from pathlib import Path
4
  import argparse
5
  import shutil
6
  from train_dreambooth import run_training
 
7
 
8
  css = '''
9
  .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
@@ -11,7 +12,7 @@ css = '''
11
  #component-4, #component-3, #component-10{min-height: 0}
12
  '''
13
  shutil.unpack_archive("mix.zip", "mix")
14
- model_to_load = "stable-diffusion-v1-5"
15
  maximum_concepts = 3
16
  def swap_values_files(*total_files):
17
  file_counter = 0
@@ -52,8 +53,22 @@ def train(*inputs):
52
  os.makedirs('instance_images',exist_ok=True)
53
  files = inputs[i+(maximum_concepts*2)]
54
  prompt = inputs[i+maximum_concepts]
55
- for j, file in enumerate(files):
56
- shutil.copy(file.name, f'instance_images/{prompt} ({j+1}).jpg')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  file_counter += 1
58
 
59
  uses_custom = inputs[-1]
@@ -145,6 +160,8 @@ def train(*inputs):
145
 
146
  run_training(args_general)
147
  os.rmdir('instance_images')
 
 
148
  with gr.Blocks(css=css) as demo:
149
  with gr.Box():
150
  # You can remove this part here for your local clone
@@ -219,11 +236,12 @@ with gr.Blocks(css=css) as demo:
219
  gr.Markdown("If not checked, the number of steps and % of frozen encoder will be tuned automatically according to the amount of images you upload and whether you are training an `object`, `person` or `style`.")
220
  steps = gr.Number(label="How many steps", value=800)
221
  perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
222
-
223
  #for file in file_collection:
224
  # file.change(fn=swap_values_files, inputs=file_collection, outputs=[steps])
225
 
226
  type_of_thing.change(fn=swap_text, inputs=[type_of_thing], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder])
227
  train_btn = gr.Button("Start Training")
228
- train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[])
 
229
  demo.launch()
 
4
  import argparse
5
  import shutil
6
  from train_dreambooth import run_training
7
+ from PIL import Image
8
 
9
  css = '''
10
  .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
 
12
  #component-4, #component-3, #component-10{min-height: 0}
13
  '''
14
  shutil.unpack_archive("mix.zip", "mix")
15
+ model_to_load = "multimodalart/sd-fine-tunable"
16
  maximum_concepts = 3
17
  def swap_values_files(*total_files):
18
  file_counter = 0
 
53
  os.makedirs('instance_images',exist_ok=True)
54
  files = inputs[i+(maximum_concepts*2)]
55
  prompt = inputs[i+maximum_concepts]
56
+ for j, file_temp in enumerate(files):
57
+ file = Image.open(file_temp.name)
58
+ width, height = file.size
59
+ side_length = min(width, height)
60
+ left = (width - side_length)/2
61
+ top = (height - side_length)/2
62
+ right = (width + side_length)/2
63
+ bottom = (height + side_length)/2
64
+ image = file.crop((left, top, right, bottom))
65
+ image = image.resize((512, 512))
66
+ extension = file_temp.name.split(".")[1]
67
+ if (extension.upper() == "JPG"):
68
+ image.save(f'instance_images/{prompt}_({j+1}).jpg', format="JPEG", quality = 100)
69
+ else:
70
+ image.save(f'instance_images/{prompt}_({j+1}).jpg', format=extension.upper())
71
+ #shutil.copy(file.name, )
72
  file_counter += 1
73
 
74
  uses_custom = inputs[-1]
 
160
 
161
  run_training(args_general)
162
  os.rmdir('instance_images')
163
+ shutil.make_archive("output_model.zip", 'zip', "output_model")
164
+ return gr.update(visible=True, value="output_model.zip")
165
  with gr.Blocks(css=css) as demo:
166
  with gr.Box():
167
  # You can remove this part here for your local clone
 
236
  gr.Markdown("If not checked, the number of steps and % of frozen encoder will be tuned automatically according to the amount of images you upload and whether you are training an `object`, `person` or `style`.")
237
  steps = gr.Number(label="How many steps", value=800)
238
  perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
239
+
240
  #for file in file_collection:
241
  # file.change(fn=swap_values_files, inputs=file_collection, outputs=[steps])
242
 
243
  type_of_thing.change(fn=swap_text, inputs=[type_of_thing], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder])
244
  train_btn = gr.Button("Start Training")
245
+ result = gr.File(label="Uploaded model")
246
+ train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result])
247
  demo.launch()