multimodalart HF staff commited on
Commit
dbfd73e
1 Parent(s): 3777984

Updated version

Browse files
Files changed (2) hide show
  1. app.py +10 -5
  2. train_dreambooth.py +42 -12
app.py CHANGED
@@ -11,6 +11,7 @@ css = '''
11
  #component-4, #component-3, #component-10{min-height: 0}
12
  '''
13
  shutil.unpack_archive("mix.zip", "mix")
 
14
  maximum_concepts = 3
15
  def swap_values_files(*total_files):
16
  file_counter = 0
@@ -59,15 +60,15 @@ def train(*inputs):
59
  if(uses_custom):
60
  Training_Steps = int(inputs[-3])
61
  Train_text_encoder_for = int(inputs[-2])
62
- stptxt = int((Training_Steps*Train_text_encoder_for)/100)
63
  else:
64
  Training_Steps = file_counter*200
65
  if(inputs[-4] == "person"):
66
  class_data_dir = "mix"
 
67
  args_txt_encoder = argparse.Namespace(
68
  image_captions_filename = True,
69
  train_text_encoder = True,
70
- pretrained_model_name_or_path="./stable-diffusion-v1-5",
71
  instance_data_dir="instance_images",
72
  class_data_dir=class_data_dir,
73
  output_dir="output_model",
@@ -93,7 +94,7 @@ def train(*inputs):
93
  Session_dir="output_model",
94
  save_starting_step=0,
95
  save_n_steps=0,
96
- pretrained_model_name_or_path="./stable-diffusion-v1-5",
97
  instance_data_dir="instance_images",
98
  output_dir="output_model",
99
  instance_prompt="",
@@ -112,17 +113,20 @@ def train(*inputs):
112
  run_training(args_txt_encoder)
113
  run_training(args_unet)
114
  elif(inputs[-4] == "object"):
 
115
  class_data_dir = None
116
  elif(inputs[-4] == "style"):
 
117
  class_data_dir = None
118
-
 
119
  args_general = argparse.Namespace(
120
  image_captions_filename = True,
121
  train_text_encoder = True,
122
  stop_text_encoder_training = stptxt,
123
  save_n_steps = 0,
124
  dump_only_text_encoder = True,
125
- pretrained_model_name_or_path = "./stable-diffusion-v1-5",
126
  instance_data_dir="instance_images",
127
  class_data_dir=class_data_dir,
128
  output_dir="output_model",
@@ -138,6 +142,7 @@ def train(*inputs):
138
  lr_warmup_steps = 0,
139
  max_train_steps=Training_Steps,
140
  )
 
141
  run_training(args_general)
142
  os.rmdir('instance_images')
143
  with gr.Blocks(css=css) as demo:
 
11
  #component-4, #component-3, #component-10{min-height: 0}
12
  '''
13
  shutil.unpack_archive("mix.zip", "mix")
14
+ model_to_load = "stable-diffusion-v1-5"
15
  maximum_concepts = 3
16
  def swap_values_files(*total_files):
17
  file_counter = 0
 
60
  if(uses_custom):
61
  Training_Steps = int(inputs[-3])
62
  Train_text_encoder_for = int(inputs[-2])
 
63
  else:
64
  Training_Steps = file_counter*200
65
  if(inputs[-4] == "person"):
66
  class_data_dir = "mix"
67
+ Train_text_encoder_for=100
68
  args_txt_encoder = argparse.Namespace(
69
  image_captions_filename = True,
70
  train_text_encoder = True,
71
+ pretrained_model_name_or_path=model_to_load,
72
  instance_data_dir="instance_images",
73
  class_data_dir=class_data_dir,
74
  output_dir="output_model",
 
94
  Session_dir="output_model",
95
  save_starting_step=0,
96
  save_n_steps=0,
97
+ pretrained_model_name_or_path=model_to_load,
98
  instance_data_dir="instance_images",
99
  output_dir="output_model",
100
  instance_prompt="",
 
113
  run_training(args_txt_encoder)
114
  run_training(args_unet)
115
  elif(inputs[-4] == "object"):
116
+ Train_text_encoder_for=30
117
  class_data_dir = None
118
  elif(inputs[-4] == "style"):
119
+ Train_text_encoder_for=15
120
  class_data_dir = None
121
+
122
+ stptxt = int((Training_Steps*Train_text_encoder_for)/100)
123
  args_general = argparse.Namespace(
124
  image_captions_filename = True,
125
  train_text_encoder = True,
126
  stop_text_encoder_training = stptxt,
127
  save_n_steps = 0,
128
  dump_only_text_encoder = True,
129
+ pretrained_model_name_or_path = model_to_load,
130
  instance_data_dir="instance_images",
131
  class_data_dir=class_data_dir,
132
  output_dir="output_model",
 
142
  lr_warmup_steps = 0,
143
  max_train_steps=Training_Steps,
144
  )
145
+
146
  run_training(args_general)
147
  os.rmdir('instance_images')
148
  with gr.Blocks(css=css) as demo:
train_dreambooth.py CHANGED
@@ -33,7 +33,7 @@ def parse_args():
33
  "--pretrained_model_name_or_path",
34
  type=str,
35
  default=None,
36
- required=True,
37
  help="Path to pretrained model or model identifier from huggingface.co/models.",
38
  )
39
  parser.add_argument(
@@ -46,7 +46,7 @@ def parse_args():
46
  "--instance_data_dir",
47
  type=str,
48
  default=None,
49
- required=True,
50
  help="A folder containing the training data of instance images.",
51
  )
52
  parser.add_argument(
@@ -250,14 +250,14 @@ def parse_args():
250
  if env_local_rank != -1 and env_local_rank != args.local_rank:
251
  args.local_rank = env_local_rank
252
 
253
- if args.instance_data_dir is None:
254
- raise ValueError("You must specify a train data directory.")
255
 
256
- if args.with_prior_preservation:
257
- if args.class_data_dir is None:
258
- raise ValueError("You must specify a data directory for class images.")
259
- if args.class_prompt is None:
260
- raise ValueError("You must specify prompt for class images.")
261
 
262
  return args
263
 
@@ -388,9 +388,39 @@ def get_full_repo_name(model_id: str, organization: Optional[str] = None, token:
388
  else:
389
  return f"{organization}/{model_id}"
390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
 
392
- def run_training(args):
393
- #args = parse_args()
 
 
394
  logging_dir = Path(args.output_dir, args.logging_dir)
395
  i=args.save_starting_step
396
  accelerator = Accelerator(
@@ -468,7 +498,7 @@ def run_training(args):
468
  if args.tokenizer_name:
469
  tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
470
  elif args.pretrained_model_name_or_path:
471
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
472
 
473
  # Load models and create wrapper for stable diffusion
474
  if args.train_only_unet:
 
33
  "--pretrained_model_name_or_path",
34
  type=str,
35
  default=None,
36
+ #required=True,
37
  help="Path to pretrained model or model identifier from huggingface.co/models.",
38
  )
39
  parser.add_argument(
 
46
  "--instance_data_dir",
47
  type=str,
48
  default=None,
49
+ #required=True,
50
  help="A folder containing the training data of instance images.",
51
  )
52
  parser.add_argument(
 
250
  if env_local_rank != -1 and env_local_rank != args.local_rank:
251
  args.local_rank = env_local_rank
252
 
253
+ #if args.instance_data_dir is None:
254
+ # raise ValueError("You must specify a train data directory.")
255
 
256
+ #if args.with_prior_preservation:
257
+ # if args.class_data_dir is None:
258
+ # raise ValueError("You must specify a data directory for class images.")
259
+ # if args.class_prompt is None:
260
+ # raise ValueError("You must specify prompt for class images.")
261
 
262
  return args
263
 
 
388
  else:
389
  return f"{organization}/{model_id}"
390
 
391
+ def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
392
+ """
393
+ Starts from base starting dict and then adds the remaining key values from updater replacing the values from
394
+ the first starting/base dict with the second updater dict.
395
+
396
+ For later: how does d = {**d1, **d2} replace collision?
397
+
398
+ :param starting_dict:
399
+ :param updater_dict:
400
+ :return:
401
+ """
402
+ new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
403
+ new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
404
+ return new_dict
405
+
406
+ def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
407
+ """
408
+
409
+ ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
410
+ :param args1:
411
+ :param args2:
412
+ :return:
413
+ """
414
+ # - the merged args
415
+ # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
416
+ merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
417
+ args = argparse.Namespace(**merged_key_values_for_namespace)
418
+ return args
419
 
420
+ def run_training(args_imported):
421
+ args_default = parse_args()
422
+ args = merge_args(args_default, args_imported)
423
+ print(args)
424
  logging_dir = Path(args.output_dir, args.logging_dir)
425
  i=args.save_starting_step
426
  accelerator = Accelerator(
 
498
  if args.tokenizer_name:
499
  tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
500
  elif args.pretrained_model_name_or_path:
501
+ tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer", use_auth_token=True)
502
 
503
  # Load models and create wrapper for stable diffusion
504
  if args.train_only_unet: