TheLastBen commited on
Commit
4e6e523
1 Parent(s): ab232bf

Update Notebooks/PPS-Dreambooth-v1.ipynb

Browse files
Files changed (1) hide show
  1. Notebooks/PPS-Dreambooth-v1.ipynb +11 -11
Notebooks/PPS-Dreambooth-v1.ipynb CHANGED
@@ -224,21 +224,21 @@
224
  "# If you're not satisfied with the result, Set to True, run again the cell and it will continue training the current model.\n",
225
  "\n",
226
  "\n",
227
- "UNet_Training_Steps=1500\n",
228
  "\n",
229
- "UNet_Learning_Rate = \"4e-6\"\n",
230
  "\n",
231
- "# If you use 10 images, use 1500 steps, if you're not satisfied with the result, resume training for another 200 steps, and so on ...\n",
232
  "\n",
233
  "\n",
234
- "Text_Encoder_Training_Steps=300\n",
235
  "\n",
236
  "Text_Encoder_Learning_Rate= \"1e-6\"\n",
237
  "\n",
238
  "# 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, set it to 0 before resuming training if it is already trained.\n",
239
  "\n",
240
  "\n",
241
- "Text_Encoder_Concept_Training_Steps=0\n",
242
  "\n",
243
  "# Suitable for training a style/concept as it acts as regularization, with a minimum of 300 steps, 1 step/image is enough to train the concept(s), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, set it to 0 before resuming training if it is already trained.\n",
244
  "\n",
@@ -248,26 +248,26 @@
248
  "# Get the captions from a text file for each instance image.\n",
249
  "\n",
250
  "\n",
251
- "Style_Training=False\n",
252
  "\n",
253
- "# Further reduce overfitting, suitable when training a style or a general theme, don't check the box at the beginning, check it after training for at least 800 steps. (Has no effect when using External Captions)\n",
254
  "\n",
255
  "\n",
256
- "Resolution = 512\n",
257
  "\n",
258
  "# Choices : \"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"\n",
259
  "# Higher resolution = Higher quality, make sure the instance images are cropped to this selected size (or larger).\n",
260
  "\n",
261
  "#---------------------------------------------------------------\n",
262
  "\n",
263
- "Save_Checkpoint_Every_n_Steps = False\n",
264
  "\n",
265
- "Save_Checkpoint_Every=500\n",
266
  "\n",
267
  "# Minimum 200 steps between each save.\n",
268
  "\n",
269
  "\n",
270
- "Start_saving_from_the_step=500\n",
271
  "\n",
272
  "# Start saving intermediary checkpoints from this step.\n",
273
  "\n",
 
224
  "# If you're not satisfied with the result, Set to True, run again the cell and it will continue training the current model.\n",
225
  "\n",
226
  "\n",
227
+ "UNet_Training_Steps= 1500\n",
228
  "\n",
229
+ "UNet_Learning_Rate= \"2e-6\"\n",
230
  "\n",
231
+ "# If you use 10 images, use 650 steps, if you're not satisfied with the result, resume training for another 200 steps with a lower learning rate (8e-6), and so on ...\n",
232
  "\n",
233
  "\n",
234
+ "Text_Encoder_Training_Steps= 350\n",
235
  "\n",
236
  "Text_Encoder_Learning_Rate= \"1e-6\"\n",
237
  "\n",
238
  "# 350-600 steps is enough for a small dataset, keep this number small to avoid overfitting, set to 0 to disable, set it to 0 before resuming training if it is already trained.\n",
239
  "\n",
240
  "\n",
241
+ "Text_Encoder_Concept_Training_Steps= 0\n",
242
  "\n",
243
  "# Suitable for training a style/concept as it acts as regularization, with a minimum of 300 steps, 1 step/image is enough to train the concept(s), set to 0 to disable, set both the settings above to 0 to fintune only the text_encoder on the concept, set it to 0 before resuming training if it is already trained.\n",
244
  "\n",
 
248
  "# Get the captions from a text file for each instance image.\n",
249
  "\n",
250
  "\n",
251
+ "Offset_Noise= False\n",
252
  "\n",
253
+ "# Always use it for style training.\n",
254
  "\n",
255
  "\n",
256
+ "Resolution = 768\n",
257
  "\n",
258
  "# Choices : \"512\", \"576\", \"640\", \"704\", \"768\", \"832\", \"896\", \"960\", \"1024\"\n",
259
  "# Higher resolution = Higher quality, make sure the instance images are cropped to this selected size (or larger).\n",
260
  "\n",
261
  "#---------------------------------------------------------------\n",
262
  "\n",
263
+ "Save_Checkpoint_Every_n_Steps= False\n",
264
  "\n",
265
+ "Save_Checkpoint_Every= 500\n",
266
  "\n",
267
  "# Minimum 200 steps between each save.\n",
268
  "\n",
269
  "\n",
270
+ "Start_saving_from_the_step= 500\n",
271
  "\n",
272
  "# Start saving intermediary checkpoints from this step.\n",
273
  "\n",