File size: 2,882 Bytes
602c735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
{
  "shuffle_per_epoch": false,
  "attention": "xformers",
  "model_variant": "base",
  "aspect_mode": "dynamic",
  "aspect_mode_action_preference": "add",
  "use_ema": true,
  "clip_penultimate": false,
  "conditional_dropout": null,
  "disable_cudnn_benchmark": true,
  "use_text_files_as_captions": false,
  "sample_from_batch": 0,
  "stop_text_encoder_training": 999999999999999,
  "use_bucketing": true,
  "regenerate_latent_cache": true,
  "sample_on_training_start": true,
  "add_class_images_to_dataset": false,
  "auto_balance_concept_datasets": false,
  "sample_aspect_ratios": false,
  "dataset_repeats": 1,
  "save_every_n_epoch": 5,
  "pretrained_model_name_or_path": "mekrod/vmp1",
  "pretrained_vae_name_or_path": "input_vae_model/VAE",
  "tokenizer_name": null,
  "instance_data_dir": null,
  "class_data_dir": null,
  "instance_prompt": null,
  "class_prompt": null,
  "save_sample_prompt": null,
  "n_save_sample": 1,
  "sample_height": 512,
  "sample_width": 512,
  "save_guidance_scale": 7.5,
  "save_infer_steps": 30,
  "with_prior_preservation": false,
  "prior_loss_weight": 1.0,
  "num_class_images": 206,
  "output_dir": "output/vmp1e",
  "seed": 3534554,
  "resolution": 512,
  "center_crop": false,
  "train_text_encoder": false,
  "train_batch_size": 1,
  "sample_batch_size": 4,
  "num_train_epochs": 20,
  "max_train_steps": 1140,
  "gradient_accumulation_steps": 1,
  "gradient_checkpointing": true,
  "learning_rate": 1e-06,
  "scale_lr": false,
  "lr_scheduler": "constant",
  "lr_warmup_steps": 0,
  "use_8bit_adam": true,
  "adam_beta1": 0.9,
  "adam_beta2": 0.999,
  "adam_weight_decay": 0.01,
  "adam_epsilon": 1e-08,
  "max_grad_norm": 1.0,
  "push_to_hub": false,
  "hub_token": null,
  "hub_model_id": null,
  "logging_dir": "logs",
  "log_interval": 10,
  "sample_step_interval": 500,
  "mixed_precision": "fp16",
  "local_rank": -1,
  "concepts_list": [
    {
      "instance_prompt": "Magali Villeneuve",
      "instance_data_dir": "datasets/Magali Villeneuve",
      "class_prompt": "",
      "class_data_dir": "",
      "do_not_balance": 0,
      "use_sub_dirs": 0
    },
    {
      "instance_prompt": "Bo Chen",
      "instance_data_dir": "datasets/Bo Chen",
      "class_prompt": "",
      "class_data_dir": "",
      "do_not_balance": 0,
      "use_sub_dirs": 0
    },
    {
      "instance_prompt": "Chengwei Pan",
      "instance_data_dir": "datasets/Chengwei Pan",
      "class_prompt": "",
      "class_data_dir": "",
      "do_not_balance": 0,
      "use_sub_dirs": 0
    }
  ],
  "save_sample_controlled_seed": null,
  "detect_full_drive": true,
  "send_telegram_updates": false,
  "telegram_chat_id": "0",
  "telegram_token": "0",
  "use_deepspeed_adam": false,
  "append_sample_controlled_seed_action": null,
  "add_sample_prompt": null,
  "use_image_names_as_captions": true,
  "mask_prompts": null,
  "batch_tokens": null
}