TheLastBen commited on
Commit
0a7dd8f
1 Parent(s): 61c2639

Upload 2 files

Browse files
Scripts/mainpaperspacev1.py CHANGED
@@ -616,7 +616,7 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
616
  --resolution='+str(Resolution)+' \
617
  --mixed_precision='+str(precision)+' \
618
  --train_batch_size=1 \
619
- --gradient_accumulation_steps=1 \
620
  --use_8bit_adam \
621
  --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
622
  --lr_scheduler="linear" \
@@ -646,7 +646,7 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
646
  --resolution='+str(Resolution)+' \
647
  --mixed_precision='+str(precision)+' \
648
  --train_batch_size=1 \
649
- --gradient_accumulation_steps=1 \
650
  --use_8bit_adam \
651
  --learning_rate='+str(UNet_Learning_Rate)+' \
652
  --lr_scheduler="linear" \
 
616
  --resolution='+str(Resolution)+' \
617
  --mixed_precision='+str(precision)+' \
618
  --train_batch_size=1 \
619
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
620
  --use_8bit_adam \
621
  --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
622
  --lr_scheduler="linear" \
 
646
  --resolution='+str(Resolution)+' \
647
  --mixed_precision='+str(precision)+' \
648
  --train_batch_size=1 \
649
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
650
  --use_8bit_adam \
651
  --learning_rate='+str(UNet_Learning_Rate)+' \
652
  --lr_scheduler="linear" \
Scripts/mainpaperspacev2.py CHANGED
@@ -664,7 +664,7 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
664
  --resolution='+str(Resolution)+' \
665
  --mixed_precision='+str(precision)+' \
666
  --train_batch_size=1 \
667
- --gradient_accumulation_steps=1 \
668
  --use_8bit_adam \
669
  --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
670
  --lr_scheduler="linear" \
@@ -693,7 +693,7 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
693
  --resolution='+str(Resolution)+' \
694
  --mixed_precision='+str(precision)+' \
695
  --train_batch_size=1 \
696
- --gradient_accumulation_steps=1 \
697
  --use_8bit_adam \
698
  --learning_rate='+str(UNet_Learning_Rate)+' \
699
  --lr_scheduler="linear" \
 
664
  --resolution='+str(Resolution)+' \
665
  --mixed_precision='+str(precision)+' \
666
  --train_batch_size=1 \
667
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
668
  --use_8bit_adam \
669
  --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
670
  --lr_scheduler="linear" \
 
693
  --resolution='+str(Resolution)+' \
694
  --mixed_precision='+str(precision)+' \
695
  --train_batch_size=1 \
696
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
697
  --use_8bit_adam \
698
  --learning_rate='+str(UNet_Learning_Rate)+' \
699
  --lr_scheduler="linear" \