TheLastBen commited on
Commit
866e444
1 Parent(s): 8434af7

Update Notebooks/PPS-Dreambooth-v2.ipynb

Browse files
Files changed (1) hide show
  1. Notebooks/PPS-Dreambooth-v2.ipynb +10 -10
Notebooks/PPS-Dreambooth-v2.ipynb CHANGED
@@ -233,21 +233,21 @@
233
  "# If you're not satisfied with the result, Set to True, run again the cell and it will continue training the current model.\n",
234
  "\n",
235
  "\n",
236
- "UNet_Training_Steps=850\n",
237
  "\n",
238
- "UNet_Learning_Rate = \"6e-6\"\n",
239
  "\n",
240
  "# If you use 10 images, use 650 steps, if you're not satisfied with the result, resume training for another 200 steps with a lower learning rate (8e-6), and so on ...\n",
241
  "\n",
242
  "\n",
243
- "Text_Encoder_Training_Steps=300\n",
244
  "\n",
245
  "Text_Encoder_Learning_Rate= \"1e-6\"\n",
246
  "\n",
247
  "# 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, set it to 0 before resuming training if it is already trained.\n",
248
  "\n",
249
  "\n",
250
- "Text_Encoder_Concept_Training_Steps=0\n",
251
  "\n",
252
  "# Suitable for training a style/concept as it acts as regularization, with a minimum of 300 steps, 1 step/image is enough to train the concept(s), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, set it to 0 before resuming training if it is already trained.\n",
253
  "\n",
@@ -257,9 +257,9 @@
257
  "# Get the captions from a text file for each instance image.\n",
258
  "\n",
259
  "\n",
260
- "Style_Training=False\n",
261
  "\n",
262
- "# Further reduce overfitting, suitable when training a style or a general theme, don't check the box at the beginning, check it after training for at least 800 steps. (Has no effect when using External Captions)\n",
263
  "\n",
264
  "\n",
265
  "Resolution = 768\n",
@@ -269,20 +269,20 @@
269
  "\n",
270
  "#---------------------------------------------------------------\n",
271
  "\n",
272
- "Save_Checkpoint_Every_n_Steps = False\n",
273
  "\n",
274
- "Save_Checkpoint_Every=500\n",
275
  "\n",
276
  "# Minimum 200 steps between each save.\n",
277
  "\n",
278
  "\n",
279
- "Start_saving_from_the_step=500\n",
280
  "\n",
281
  "# Start saving intermediary checkpoints from this step.\n",
282
  "\n",
283
  "\n",
284
  "#-----------------\n",
285
- "resumev2=dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every)"
286
  ]
287
  },
288
  {
 
233
  "# If you're not satisfied with the result, Set to True, run again the cell and it will continue training the current model.\n",
234
  "\n",
235
  "\n",
236
+ "UNet_Training_Steps= 1500\n",
237
  "\n",
238
+ "UNet_Learning_Rate= \"2e-6\"\n",
239
  "\n",
240
  "# If you use 10 images, use 650 steps, if you're not satisfied with the result, resume training for another 200 steps with a lower learning rate (8e-6), and so on ...\n",
241
  "\n",
242
  "\n",
243
+ "Text_Encoder_Training_Steps= 350\n",
244
  "\n",
245
  "Text_Encoder_Learning_Rate= \"1e-6\"\n",
246
  "\n",
247
  "# 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, set it to 0 before resuming training if it is already trained.\n",
248
  "\n",
249
  "\n",
250
+ "Text_Encoder_Concept_Training_Steps= 0\n",
251
  "\n",
252
  "# Suitable for training a style/concept as it acts as regularization, with a minimum of 300 steps, 1 step/image is enough to train the concept(s), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, set it to 0 before resuming training if it is already trained.\n",
253
  "\n",
 
257
  "# Get the captions from a text file for each instance image.\n",
258
  "\n",
259
  "\n",
260
+ "Offset_Noise= False\n",
261
  "\n",
262
+ "# Always use it for style training.\n",
263
  "\n",
264
  "\n",
265
  "Resolution = 768\n",
 
269
  "\n",
270
  "#---------------------------------------------------------------\n",
271
  "\n",
272
+ "Save_Checkpoint_Every_n_Steps= False\n",
273
  "\n",
274
+ "Save_Checkpoint_Every= 500\n",
275
  "\n",
276
  "# Minimum 200 steps between each save.\n",
277
  "\n",
278
  "\n",
279
+ "Start_saving_from_the_step= 500\n",
280
  "\n",
281
  "# Start saving intermediary checkpoints from this step.\n",
282
  "\n",
283
  "\n",
284
  "#-----------------\n",
285
+ "resumev2=dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every)"
286
  ]
287
  },
288
  {