rexoscare commited on
Commit
8dddd71
1 Parent(s): 223d0fd

Upload folder using huggingface_hub

Browse files
Files changed (38) hide show
  1. c0cf44c8-3532-426c-80e0-d866ffbc924f/07-08IKP_A.jpg +3 -0
  2. c0cf44c8-3532-426c-80e0-d866ffbc924f/3-kalighat-patachitra-anwar-chitrakar.jpg +3 -0
  3. c0cf44c8-3532-426c-80e0-d866ffbc924f/311PM030.jpg +3 -0
  4. c0cf44c8-3532-426c-80e0-d866ffbc924f/362BC016.jpg +3 -0
  5. c0cf44c8-3532-426c-80e0-d866ffbc924f/362BC018.jpg +3 -0
  6. c0cf44c8-3532-426c-80e0-d866ffbc924f/362BC020.jpg +3 -0
  7. c0cf44c8-3532-426c-80e0-d866ffbc924f/519H-XHERwL._AC_UF10001000_QL80_.jpg +3 -0
  8. c0cf44c8-3532-426c-80e0-d866ffbc924f/A1KFjohu4IL.jpg +3 -0
  9. c0cf44c8-3532-426c-80e0-d866ffbc924f/BS13.jpg +3 -0
  10. c0cf44c8-3532-426c-80e0-d866ffbc924f/BS17.jpg +3 -0
  11. c0cf44c8-3532-426c-80e0-d866ffbc924f/IMG-20210201-WA0038-756x1024.jpg +3 -0
  12. c0cf44c8-3532-426c-80e0-d866ffbc924f/India_Kalighat_painting_19th_century_-_Two_Aspects_of_Kali-_Kali_Enshrined_-_1980.216.b_-_Cleveland_Museum_of_Art.jpg +3 -0
  13. c0cf44c8-3532-426c-80e0-d866ffbc924f/KAL210.jpg +3 -0
  14. c0cf44c8-3532-426c-80e0-d866ffbc924f/KAL453.jpg +3 -0
  15. c0cf44c8-3532-426c-80e0-d866ffbc924f/Kalighat-133____Size-_18x11.5__Medium-_water_colour_on_paper___price-12_000.jpg +3 -0
  16. c0cf44c8-3532-426c-80e0-d866ffbc924f/MA158330.jpg +3 -0
  17. c0cf44c8-3532-426c-80e0-d866ffbc924f/MA243022.jpg +3 -0
  18. c0cf44c8-3532-426c-80e0-d866ffbc924f/MA243160.jpg +3 -0
  19. c0cf44c8-3532-426c-80e0-d866ffbc924f/MA288407.jpg +3 -0
  20. c0cf44c8-3532-426c-80e0-d866ffbc924f/MA289529.jpg +3 -0
  21. c0cf44c8-3532-426c-80e0-d866ffbc924f/Mythology-7.jpg +3 -0
  22. c0cf44c8-3532-426c-80e0-d866ffbc924f/craft1575455965Kalighat Showcase 1.jpg +3 -0
  23. c0cf44c8-3532-426c-80e0-d866ffbc924f/download.jpg +3 -0
  24. c0cf44c8-3532-426c-80e0-d866ffbc924f/il_fullxfull.2732525416_o9p2.jpg +3 -0
  25. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-1-anwar-chitrakar.jpg +3 -0
  26. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-10-anwar-chitrakar.jpg +3 -0
  27. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-12-anwar-chitrakar.jpg +3 -0
  28. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-13-anwar-chitrakar.jpg +3 -0
  29. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-15-anwar-chitrakar.jpg +3 -0
  30. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-17-anwar-chitrakar.jpg +3 -0
  31. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-18-anwar-chitrakar.jpg +3 -0
  32. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-19-anwar-chitrakar.jpg +3 -0
  33. c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-6-anwar-chitrakar.jpg +3 -0
  34. c0cf44c8-3532-426c-80e0-d866ffbc924f/lord_ganesha.jpg +3 -0
  35. c0cf44c8-3532-426c-80e0-d866ffbc924f/metadata.jsonl +34 -0
  36. requirements.txt +11 -0
  37. script.py +129 -0
  38. trainer.py +2100 -0
c0cf44c8-3532-426c-80e0-d866ffbc924f/07-08IKP_A.jpg ADDED

Git LFS Details

  • SHA256: b99a8666612a73a640cb1205a845d6d8733485b253b416770d1afbde76826519
  • Pointer size: 131 Bytes
  • Size of remote file: 136 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/3-kalighat-patachitra-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: f0b8ebbda3fd352dcd30ca34eaa09ddf4823937f2c671a43f89c6bf93484bd98
  • Pointer size: 131 Bytes
  • Size of remote file: 177 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/311PM030.jpg ADDED

Git LFS Details

  • SHA256: 30060319c7907b16239192eb2753cf8a77ed77eed99329349a556d59d7f2e8d0
  • Pointer size: 131 Bytes
  • Size of remote file: 338 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/362BC016.jpg ADDED

Git LFS Details

  • SHA256: 30d51a50b595372166af81a0859fb6fb2199dccf4ae67723e6359510a5c33a12
  • Pointer size: 131 Bytes
  • Size of remote file: 136 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/362BC018.jpg ADDED

Git LFS Details

  • SHA256: dd924c3f035f98c74dfc8fd8530d93385a02665f33b7157726345d5405a2e262
  • Pointer size: 131 Bytes
  • Size of remote file: 113 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/362BC020.jpg ADDED

Git LFS Details

  • SHA256: 89ff8c41574dc87507ddf0300ec0b599562cfcd2ff99e6f2f3fae61a014fce14
  • Pointer size: 131 Bytes
  • Size of remote file: 149 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/519H-XHERwL._AC_UF10001000_QL80_.jpg ADDED

Git LFS Details

  • SHA256: 946e4d8b38f5cacd7f272b96be86b46a94df6522e8e23548c917723d4c4cd6fd
  • Pointer size: 130 Bytes
  • Size of remote file: 99.6 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/A1KFjohu4IL.jpg ADDED

Git LFS Details

  • SHA256: e5ccf9935b25cd7f168891b7f1b13984d893ea4dc52202c9de314eaaa2b2d7bc
  • Pointer size: 132 Bytes
  • Size of remote file: 1.13 MB
c0cf44c8-3532-426c-80e0-d866ffbc924f/BS13.jpg ADDED

Git LFS Details

  • SHA256: 28e5cc244c984f0ede3803b672a4d1a39373f68fd18543657d9a81348b8df327
  • Pointer size: 130 Bytes
  • Size of remote file: 88.8 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/BS17.jpg ADDED

Git LFS Details

  • SHA256: 4160d1f588eb7fcf3ea8fdf3b7c4659040fa4d5b0f23af0106f1871122e2e5e6
  • Pointer size: 131 Bytes
  • Size of remote file: 102 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/IMG-20210201-WA0038-756x1024.jpg ADDED

Git LFS Details

  • SHA256: 31cc5d1bb1131cd376a193885fd758342836d262fe8d4c577a37dbe660c7416b
  • Pointer size: 131 Bytes
  • Size of remote file: 131 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/India_Kalighat_painting_19th_century_-_Two_Aspects_of_Kali-_Kali_Enshrined_-_1980.216.b_-_Cleveland_Museum_of_Art.jpg ADDED

Git LFS Details

  • SHA256: 6b97674c2332ee3769df36fd2c9c78926dcd8418da3285a34def84523d837b34
  • Pointer size: 132 Bytes
  • Size of remote file: 2.87 MB
c0cf44c8-3532-426c-80e0-d866ffbc924f/KAL210.jpg ADDED

Git LFS Details

  • SHA256: dbc6538bc5ef710963a2bd678974ce1567fecb20e3546accb96869df0e4411ef
  • Pointer size: 130 Bytes
  • Size of remote file: 75.6 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/KAL453.jpg ADDED

Git LFS Details

  • SHA256: a998227cd07f595faf7d64f3c63ca00017889b8aab4acd59cce3ff21d9b6aaba
  • Pointer size: 130 Bytes
  • Size of remote file: 77.8 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/Kalighat-133____Size-_18x11.5__Medium-_water_colour_on_paper___price-12_000.jpg ADDED

Git LFS Details

  • SHA256: b4ffd505df30b1dea3ded5ab8ebf84ad7e0453f31532658d3fcbdea23a9a5b40
  • Pointer size: 131 Bytes
  • Size of remote file: 223 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/MA158330.jpg ADDED

Git LFS Details

  • SHA256: 8d65978c5003ab92bf578a5ae53ce051488798b45e49fdcc1c1344725304f9c9
  • Pointer size: 130 Bytes
  • Size of remote file: 37.7 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/MA243022.jpg ADDED

Git LFS Details

  • SHA256: 743e649c2a661f6449006ea4fcd51bd0e0e12db3fb94c1feded0fa7a3a1e99ce
  • Pointer size: 130 Bytes
  • Size of remote file: 42.7 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/MA243160.jpg ADDED

Git LFS Details

  • SHA256: c14a129c437f2ae524f9fe97c4aa1ca44319d6d8754aa05bbc6c98c22d796b67
  • Pointer size: 130 Bytes
  • Size of remote file: 43.4 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/MA288407.jpg ADDED

Git LFS Details

  • SHA256: e0985945b497ff47a7711639526b95f49c66651efd0b105a97332b5402680cec
  • Pointer size: 130 Bytes
  • Size of remote file: 59.9 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/MA289529.jpg ADDED

Git LFS Details

  • SHA256: 026c6be6598be0f6c8a8c23157278cfa30452d82012bf0f1fc96ddc1bf64208a
  • Pointer size: 130 Bytes
  • Size of remote file: 59 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/Mythology-7.jpg ADDED

Git LFS Details

  • SHA256: 2443dc61b84ae50e4bac12d95f99a115b882f760fa5e5c7934c60a0e943a35d1
  • Pointer size: 131 Bytes
  • Size of remote file: 176 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/craft1575455965Kalighat Showcase 1.jpg ADDED

Git LFS Details

  • SHA256: 078dc6eb42c6bd6b43fd62c236bd27f48f2b489bfdde7829081a4dbc69205d55
  • Pointer size: 131 Bytes
  • Size of remote file: 374 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/download.jpg ADDED

Git LFS Details

  • SHA256: d7e84298f64b47ff33628ef8d133668b3bf25cb93b28d99e76f01fc60e82c43c
  • Pointer size: 130 Bytes
  • Size of remote file: 15.4 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/il_fullxfull.2732525416_o9p2.jpg ADDED

Git LFS Details

  • SHA256: 82c9c719a3c2e890ac5277bbda26d20a6435d037a0d706350164346da5d91904
  • Pointer size: 131 Bytes
  • Size of remote file: 124 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-1-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: 2c1a270b573ed4f527c5c27d1f7e6db7994a8430785e528fc0e6dc36a417fd23
  • Pointer size: 131 Bytes
  • Size of remote file: 183 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-10-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: 00f83419cfee40912f276e21c950a391b99ad11d49ae3584b3a9a36991f2a82c
  • Pointer size: 131 Bytes
  • Size of remote file: 163 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-12-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: 823c86fc5bc2efcdd9b47dc414f088bb83d15573842549ff830dbb57baac2154
  • Pointer size: 131 Bytes
  • Size of remote file: 181 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-13-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: dab58d8faf19986496abd04292652177f8a264d3bbd6cfa0971cc75eac8724f3
  • Pointer size: 131 Bytes
  • Size of remote file: 173 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-15-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: 14bf2e1d6498b6ca6c1cd9128aae92ebaabfa68c24d70304f83e1e0d077f4f7d
  • Pointer size: 131 Bytes
  • Size of remote file: 189 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-17-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: df912c87903aac6fc228f301f892824f345bc47eafdf3a834cd84908c9a2cc78
  • Pointer size: 131 Bytes
  • Size of remote file: 148 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-18-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: bcdf0a25bab29b6e9cc0e643d767893ebb02432634a5395eefe2296c3a8eb1b5
  • Pointer size: 131 Bytes
  • Size of remote file: 172 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-19-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: 83743f293c523820b847736dd16b86c5571463da5f8795c90406874fe9b24be8
  • Pointer size: 131 Bytes
  • Size of remote file: 176 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/kalighat-patachitra-6-anwar-chitrakar.jpg ADDED

Git LFS Details

  • SHA256: 83be94e416f863ce76f4b4255ab589d01b62e3fc2b4208903c96ac681e46180d
  • Pointer size: 131 Bytes
  • Size of remote file: 157 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/lord_ganesha.jpg ADDED

Git LFS Details

  • SHA256: e2279ce32539ad42d9d730f2507dcaa240b0593b8cf68b2f4315174786bd0ca4
  • Pointer size: 131 Bytes
  • Size of remote file: 512 kB
c0cf44c8-3532-426c-80e0-d866ffbc924f/metadata.jsonl ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"file_name": "3-kalighat-patachitra-anwar-chitrakar.jpg", "prompt": "an indian painting depicting a woman and man in the style of TOK"}
2
+ {"file_name": "07-08IKP_A.jpg", "prompt": "a painting of a man sitting on a chair with a glass in the style of TOK"}
3
+ {"file_name": "311PM030.jpg", "prompt": "an indian painting of a woman sitting and petting a peacock in the style of TOK"}
4
+ {"file_name": "362BC016.jpg", "prompt": "an indian woman is taking a selfie with her husband in the style of TOK"}
5
+ {"file_name": "362BC018.jpg", "prompt": "a painting of a woman swinging on a swing in the style of TOK"}
6
+ {"file_name": "362BC020.jpg", "prompt": "an indian painting of a man holding a fish in the style of TOK"}
7
+ {"file_name": "519H-XHERwL._AC_UF10001000_QL80_.jpg", "prompt": "an indian painting depicting a bird and a man in the style of TOK"}
8
+ {"file_name": "A1KFjohu4IL.jpg", "prompt": "an indian painting depicting four women dancing in the style of TOK"}
9
+ {"file_name": "BS13.jpg", "prompt": "an indian painting depicting a woman pouring water into a pot in the style of TOK"}
10
+ {"file_name": "BS17.jpg", "prompt": "a painting depicting two women carrying bricks in the style of TOK"}
11
+ {"file_name": "craft1575455965Kalighat Showcase 1.jpg", "prompt": "a painting of a woman sitting on a chair with a baby lord Ganesh in the style of TOK"}
12
+ {"file_name": "download.jpg", "prompt": "a painting of a woman carrying branches of a tree in the style of TOK"}
13
+ {"file_name": "il_fullxfull.2732525416_o9p2.jpg", "prompt": "an indian painting of a man and woman in traditional clothing where man is smoking a hookah in the style of TOK"}
14
+ {"file_name": "IMG-20210201-WA0038-756x1024.jpg", "prompt": "an indian painting of a couple sitting on a couch in the style of TOK"}
15
+ {"file_name": "India_Kalighat_painting_19th_century_-_Two_Aspects_of_Kali-_Kali_Enshrined_-_1980.216.b_-_Cleveland_Museum_of_Art.jpg", "prompt": "an indian painting depicting a hindu goddess Kali in the style of TOK"}
16
+ {"file_name": "KAL210.jpg", "prompt": "a painting of two men in traditional clothing in the style of TOK"}
17
+ {"file_name": "KAL453.jpg", "prompt": "a painting of a woman playing a sitar in the style of TOK"}
18
+ {"file_name": "Kalighat-133____Size-_18x11.5__Medium-_water_colour_on_paper___price-12_000.jpg", "prompt": "a painting of a woman in a sari in the style of TOK"}
19
+ {"file_name": "kalighat-patachitra-1-anwar-chitrakar.jpg", "prompt": "an indian painting depicting a woman getting her hair done in the style of TOK"}
20
+ {"file_name": "kalighat-patachitra-6-anwar-chitrakar.jpg", "prompt": "a painting of one men sitting on a chair and the other one on the floor in the style of TOK"}
21
+ {"file_name": "kalighat-patachitra-10-anwar-chitrakar.jpg", "prompt": "a painting of a man sitting on a chair with a book in the style of TOK"}
22
+ {"file_name": "kalighat-patachitra-12-anwar-chitrakar.jpg", "prompt": "an indian painting depicting two women in traditional clothing in the style of TOK"}
23
+ {"file_name": "kalighat-patachitra-13-anwar-chitrakar.jpg", "prompt": "a painting of a man playing a sitar in the style of TOK"}
24
+ {"file_name": "kalighat-patachitra-15-anwar-chitrakar.jpg", "prompt": "an indian painting of a man smoking a pipe in the style of TOK"}
25
+ {"file_name": "kalighat-patachitra-17-anwar-chitrakar.jpg", "prompt": "two women sitting on a yellow background with a brush in the style of TOK"}
26
+ {"file_name": "kalighat-patachitra-18-anwar-chitrakar.jpg", "prompt": "an indian painting depicting a woman sitting on a couch brushing her hair in the style of TOK"}
27
+ {"file_name": "kalighat-patachitra-19-anwar-chitrakar.jpg", "prompt": "an indian painting depicting a woman and man quarrelling in the style of TOK"}
28
+ {"file_name": "lord_ganesha.jpg", "prompt": "an indian painting of lord Ganesha sitting on a chair in the style of TOK"}
29
+ {"file_name": "MA158330.jpg", "prompt": "an indian painting of a man sitting on a chair in the style of TOK"}
30
+ {"file_name": "MA243022.jpg", "prompt": "an indian painting depicting a man sitting on a chariot in the style of TOK"}
31
+ {"file_name": "MA243160.jpg", "prompt": "an indian painting of a man sitting on a chair in the style of TOK"}
32
+ {"file_name": "MA288407.jpg", "prompt": "an indian painting depicting lord Shiva holding baby lord Ganesha in the style of TOK"}
33
+ {"file_name": "MA289529.jpg", "prompt": "an old painting of a man sitting on a chair and a woman operating a handheld fan in the style of TOK"}
34
+ {"file_name": "Mythology-7.jpg", "prompt": "an indian painting depicting lord Ganesha sitting on a tree in the style of TOK"}
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ peft==0.7.1
2
+ -huggingface_hub
3
+ torch
4
+ git+https://github.com/huggingface/diffusers@518171600d3eb82fc4f4c84b81dd7564b02728dc
5
+ transformers==4.36.2
6
+ accelerate==0.25.0
7
+ safetensors==0.4.1
8
+ prodigyopt==1.0
9
+ hf-transfer==0.1.4
10
+ git+https://github.com/huggingface/datasets.git@3f149204a2a5948287adcade5e90707aa5207a92
11
+ git+https://github.com/huggingface/huggingface_hub.git@8d052492fe0059c606c1a48d7a914b15b64a834d
script.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import subprocess
3
+ from safetensors.torch import load_file
4
+ from diffusers import AutoPipelineForText2Image
5
+ from datasets import load_dataset
6
+ from huggingface_hub.repocard import RepoCard
7
+ from huggingface_hub import HfApi
8
+ import torch
9
+ import re
10
+ import argparse
11
+ import os
12
+ import zipfile
13
+
14
+ def do_preprocess(class_data_dir):
15
+ print("Unzipping dataset")
16
+ zip_file_path = f"{class_data_dir}/class_images.zip"
17
+ with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
18
+ zip_ref.extractall(class_data_dir)
19
+ os.remove(zip_file_path)
20
+
21
+ def do_train(script_args):
22
+ # Pass all arguments to trainer.py
23
+ print("Starting training...")
24
+ result = subprocess.run(['python', 'trainer.py'] + script_args)
25
+ if result.returncode != 0:
26
+ raise Exception("Training failed.")
27
+
28
+ def replace_output_dir(text, output_dir, replacement):
29
+ # Define a pattern that matches the output_dir followed by whitespace, '/', new line, or "'"
30
+ # Add system name from HF only in the correct spots
31
+ pattern = rf"{output_dir}(?=[\s/'\n])"
32
+ return re.sub(pattern, replacement, text)
33
+
34
+ def do_inference(dataset_name, output_dir, num_tokens):
35
+ widget_content = []
36
+ try:
37
+ print("Starting inference to generate example images...")
38
+ dataset = load_dataset(dataset_name)
39
+ pipe = AutoPipelineForText2Image.from_pretrained(
40
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
41
+ )
42
+ pipe = pipe.to("cuda")
43
+ pipe.load_lora_weights(f'{output_dir}/pytorch_lora_weights.safetensors')
44
+
45
+ prompts = dataset["train"]["prompt"]
46
+ if(num_tokens > 0):
47
+ tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
48
+ tokens_list = [f'<s{i}>' for i in range(num_tokens)]
49
+
50
+ state_dict = load_file(f"{output_dir}/{output_dir}_emb.safetensors")
51
+ pipe.load_textual_inversion(state_dict["clip_l"], token=tokens_list, text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
52
+ pipe.load_textual_inversion(state_dict["clip_g"], token=tokens_list, text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
53
+
54
+ prompts = [prompt.replace("TOK", tokens_sequence) for prompt in prompts]
55
+
56
+ for i, prompt in enumerate(prompts):
57
+ image = pipe(prompt, num_inference_steps=25, guidance_scale=7.5).images[0]
58
+ filename = f"image-{i}.png"
59
+ image.save(f"{output_dir}/{filename}")
60
+ card_dict = {
61
+ "text": prompt,
62
+ "output": {
63
+ "url": filename
64
+ }
65
+ }
66
+ widget_content.append(card_dict)
67
+ except Exception as e:
68
+ print("Something went wrong with generating images, specifically: ", e)
69
+
70
+ try:
71
+ api = HfApi()
72
+ username = api.whoami()["name"]
73
+ repo_id = api.create_repo(f"{username}/{output_dir}", exist_ok=True, private=True).repo_id
74
+
75
+ with open(f'{output_dir}/README.md', 'r') as file:
76
+ readme_content = file.read()
77
+
78
+
79
+ readme_content = replace_output_dir(readme_content, output_dir, f"{username}/{output_dir}")
80
+
81
+ card = RepoCard(readme_content)
82
+ if widget_content:
83
+ card.data["widget"] = widget_content
84
+ card.save(f'{output_dir}/README.md')
85
+
86
+ print("Starting upload...")
87
+ api.upload_folder(
88
+ folder_path=output_dir,
89
+ repo_id=f"{username}/{output_dir}",
90
+ repo_type="model",
91
+ )
92
+ except Exception as e:
93
+ print("Something went wrong with uploading your model, specificaly: ", e)
94
+ else:
95
+ print("Upload finished!")
96
+
97
+ import sys
98
+ import argparse
99
+
100
+ def main():
101
+ # Capture all arguments except the script name
102
+ script_args = sys.argv[1:]
103
+
104
+ # Create the argument parser
105
+ parser = argparse.ArgumentParser()
106
+ parser.add_argument('--dataset_name', required=True)
107
+ parser.add_argument('--output_dir', required=True)
108
+ parser.add_argument('--num_new_tokens_per_abstraction', type=int, default=0)
109
+ parser.add_argument('--train_text_encoder_ti', action='store_true')
110
+ parser.add_argument('--class_data_dir', help="Name of the class images dataset")
111
+
112
+ # Parse known arguments
113
+ args, _ = parser.parse_known_args(script_args)
114
+
115
+ # Set num_tokens to 0 if '--train_text_encoder_ti' is not present
116
+ if not args.train_text_encoder_ti:
117
+ args.num_new_tokens_per_abstraction = 0
118
+
119
+ # Proceed with training and inference
120
+ if args.class_data_dir:
121
+ do_preprocess(args.class_data_dir)
122
+ print("Pre-processing finished!")
123
+ do_train(script_args)
124
+ print("Training finished!")
125
+ do_inference(args.dataset_name, args.output_dir, args.num_new_tokens_per_abstraction)
126
+ print("All finished!")
127
+
128
+ if __name__ == "__main__":
129
+ main()
trainer.py ADDED
@@ -0,0 +1,2100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ import argparse
17
+ import gc
18
+ import hashlib
19
+ import itertools
20
+ import logging
21
+ import math
22
+ import os
23
+ import re
24
+ import shutil
25
+ import warnings
26
+ from pathlib import Path
27
+ from typing import List, Optional
28
+
29
+ import numpy as np
30
+ import torch
31
+ import torch.nn.functional as F
32
+
33
+ # imports of the TokenEmbeddingsHandler class
34
+ import torch.utils.checkpoint
35
+ import transformers
36
+ from accelerate import Accelerator
37
+ from accelerate.logging import get_logger
38
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
39
+ from huggingface_hub import create_repo, upload_folder
40
+ from packaging import version
41
+ from peft import LoraConfig
42
+ from peft.utils import get_peft_model_state_dict
43
+ from PIL import Image
44
+ from PIL.ImageOps import exif_transpose
45
+ from safetensors.torch import load_file, save_file
46
+ from torch.utils.data import Dataset
47
+ from torchvision import transforms
48
+ from tqdm.auto import tqdm
49
+ from transformers import AutoTokenizer, PretrainedConfig
50
+
51
+ import diffusers
52
+ from diffusers import (
53
+ AutoencoderKL,
54
+ DDPMScheduler,
55
+ DPMSolverMultistepScheduler,
56
+ StableDiffusionXLPipeline,
57
+ UNet2DConditionModel,
58
+ )
59
+ from diffusers.loaders import LoraLoaderMixin
60
+ from diffusers.optimization import get_scheduler
61
+ from diffusers.training_utils import compute_snr
62
+ from diffusers.utils import (
63
+ check_min_version,
64
+ convert_all_state_dict_to_peft,
65
+ convert_state_dict_to_diffusers,
66
+ convert_state_dict_to_kohya,
67
+ is_wandb_available,
68
+ )
69
+ from diffusers.utils.import_utils import is_xformers_available
70
+
71
+
72
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
73
+ check_min_version("0.25.0.dev0")
74
+
75
+ logger = get_logger(__name__)
76
+
77
+
78
+ def save_model_card(
79
+ repo_id: str,
80
+ images=None,
81
+ base_model=str,
82
+ train_text_encoder=False,
83
+ train_text_encoder_ti=False,
84
+ token_abstraction_dict=None,
85
+ instance_prompt=str,
86
+ validation_prompt=str,
87
+ repo_folder=None,
88
+ vae_path=None,
89
+ ):
90
+ img_str = "widget:\n"
91
+ for i, image in enumerate(images):
92
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
93
+ img_str += f"""
94
+ - text: '{validation_prompt if validation_prompt else ' ' }'
95
+ output:
96
+ url:
97
+ "image_{i}.png"
98
+ """
99
+ if not images:
100
+ img_str += f"""
101
+ - text: '{instance_prompt}'
102
+ """
103
+ embeddings_filename = f"{repo_folder}_emb"
104
+ instance_prompt_webui = re.sub(r"<s\d+>", "", re.sub(r"<s\d+>", embeddings_filename, instance_prompt, count=1))
105
+ ti_keys = ", ".join(f'"{match}"' for match in re.findall(r"<s\d+>", instance_prompt))
106
+ if instance_prompt_webui != embeddings_filename:
107
+ instance_prompt_sentence = f"For example, `{instance_prompt_webui}`"
108
+ else:
109
+ instance_prompt_sentence = ""
110
+ trigger_str = f"You should use {instance_prompt} to trigger the image generation."
111
+ diffusers_imports_pivotal = ""
112
+ diffusers_example_pivotal = ""
113
+ webui_example_pivotal = ""
114
+ if train_text_encoder_ti:
115
+ trigger_str = (
116
+ "To trigger image generation of trained concept(or concepts) replace each concept identifier "
117
+ "in you prompt with the new inserted tokens:\n"
118
+ )
119
+ diffusers_imports_pivotal = """from huggingface_hub import hf_hub_download
120
+ from safetensors.torch import load_file
121
+ """
122
+ diffusers_example_pivotal = f"""embedding_path = hf_hub_download(repo_id='{repo_id}', filename='{embeddings_filename}.safetensors' repo_type="model")
123
+ state_dict = load_file(embedding_path)
124
+ pipeline.load_textual_inversion(state_dict["clip_l"], token=[{ti_keys}], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)
125
+ pipeline.load_textual_inversion(state_dict["clip_g"], token=[{ti_keys}], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2)
126
+ """
127
+ webui_example_pivotal = f"""- *Embeddings*: download **[`{embeddings_filename}.safetensors` here 💾](/{repo_id}/blob/main/{embeddings_filename}.safetensors)**.
128
+ - Place it on it on your `embeddings` folder
129
+ - Use it by adding `{embeddings_filename}` to your prompt. {instance_prompt_sentence}
130
+ (you need both the LoRA and the embeddings as they were trained together for this LoRA)
131
+ """
132
+ if token_abstraction_dict:
133
+ for key, value in token_abstraction_dict.items():
134
+ tokens = "".join(value)
135
+ trigger_str += f"""
136
+ to trigger concept `{key}` → use `{tokens}` in your prompt \n
137
+ """
138
+
139
+ yaml = f"""---
140
+ tags:
141
+ - stable-diffusion-xl
142
+ - stable-diffusion-xl-diffusers
143
+ - text-to-image
144
+ - diffusers
145
+ - lora
146
+ - template:sd-lora
147
+ {img_str}
148
+ base_model: {base_model}
149
+ instance_prompt: {instance_prompt}
150
+ license: openrail++
151
+ ---
152
+ """
153
+
154
+ model_card = f"""
155
+ # SDXL LoRA DreamBooth - {repo_id}
156
+
157
+ <Gallery />
158
+
159
+ ## Model description
160
+
161
+ ### These are {repo_id} LoRA adaption weights for {base_model}.
162
+
163
+ ## Download model
164
+
165
+ ### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke
166
+
167
+ - **LoRA**: download **[`{repo_folder}.safetensors` here 💾](/{repo_id}/blob/main/{repo_folder}.safetensors)**.
168
+ - Place it on your `models/Lora` folder.
169
+ - On AUTOMATIC1111, load the LoRA by adding `<lora:{repo_folder}:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/).
170
+ {webui_example_pivotal}
171
+
172
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
173
+
174
+ ```py
175
+ from diffusers import AutoPipelineForText2Image
176
+ import torch
177
+ {diffusers_imports_pivotal}
178
+ pipeline = AutoPipelineForText2Image.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', torch_dtype=torch.float16).to('cuda')
179
+ pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')
180
+ {diffusers_example_pivotal}
181
+ image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0]
182
+ ```
183
+
184
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
185
+
186
+ ## Trigger words
187
+
188
+ {trigger_str}
189
+
190
+ ## Details
191
+ All [Files & versions](/{repo_id}/tree/main).
192
+
193
+ The weights were trained using [🧨 diffusers Advanced Dreambooth Training Script](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py).
194
+
195
+ LoRA for the text encoder was enabled. {train_text_encoder}.
196
+
197
+ Pivotal tuning was enabled: {train_text_encoder_ti}.
198
+
199
+ Special VAE used for training: {vae_path}.
200
+
201
+ """
202
+ with open(os.path.join(repo_folder, "README.md"), "w") as f:
203
+ f.write(yaml + model_card)
204
+
205
+
206
+ def import_model_class_from_model_name_or_path(
207
+ pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
208
+ ):
209
+ text_encoder_config = PretrainedConfig.from_pretrained(
210
+ pretrained_model_name_or_path, subfolder=subfolder, revision=revision
211
+ )
212
+ model_class = text_encoder_config.architectures[0]
213
+
214
+ if model_class == "CLIPTextModel":
215
+ from transformers import CLIPTextModel
216
+
217
+ return CLIPTextModel
218
+ elif model_class == "CLIPTextModelWithProjection":
219
+ from transformers import CLIPTextModelWithProjection
220
+
221
+ return CLIPTextModelWithProjection
222
+ else:
223
+ raise ValueError(f"{model_class} is not supported.")
224
+
225
+
226
+ def parse_args(input_args=None):
227
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
228
+ parser.add_argument(
229
+ "--pretrained_model_name_or_path",
230
+ type=str,
231
+ default=None,
232
+ required=True,
233
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
234
+ )
235
+ parser.add_argument(
236
+ "--pretrained_vae_model_name_or_path",
237
+ type=str,
238
+ default=None,
239
+ help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
240
+ )
241
+ parser.add_argument(
242
+ "--revision",
243
+ type=str,
244
+ default=None,
245
+ required=False,
246
+ help="Revision of pretrained model identifier from huggingface.co/models.",
247
+ )
248
+ parser.add_argument(
249
+ "--variant",
250
+ type=str,
251
+ default=None,
252
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
253
+ )
254
+ parser.add_argument(
255
+ "--dataset_name",
256
+ type=str,
257
+ default=None,
258
+ help=(
259
+ "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private,"
260
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
261
+ " or to a folder containing files that 🤗 Datasets can understand.To load the custom captions, the training set directory needs to follow the structure of a "
262
+ "datasets ImageFolder, containing both the images and the corresponding caption for each image. see: "
263
+ "https://huggingface.co/docs/datasets/image_dataset for more information"
264
+ ),
265
+ )
266
+ parser.add_argument(
267
+ "--dataset_config_name",
268
+ type=str,
269
+ default=None,
270
+ help="The config of the Dataset. In some cases, a dataset may have more than one configuration (for example "
271
+ "if it contains different subsets of data within, and you only wish to load a specific subset - in that case specify the desired configuration using --dataset_config_name. Leave as "
272
+ "None if there's only one config.",
273
+ )
274
+ parser.add_argument(
275
+ "--instance_data_dir",
276
+ type=str,
277
+ default=None,
278
+ help="A path to local folder containing the training data of instance images. Specify this arg instead of "
279
+ "--dataset_name if you wish to train using a local folder without custom captions. If you wish to train with custom captions please specify "
280
+ "--dataset_name instead.",
281
+ )
282
+
283
+ parser.add_argument(
284
+ "--cache_dir",
285
+ type=str,
286
+ default=None,
287
+ help="The directory where the downloaded models and datasets will be stored.",
288
+ )
289
+
290
+ parser.add_argument(
291
+ "--image_column",
292
+ type=str,
293
+ default="image",
294
+ help="The column of the dataset containing the target image. By "
295
+ "default, the standard Image Dataset maps out 'file_name' "
296
+ "to 'image'.",
297
+ )
298
+ parser.add_argument(
299
+ "--caption_column",
300
+ type=str,
301
+ default=None,
302
+ help="The column of the dataset containing the instance prompt for each image",
303
+ )
304
+
305
+ parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.")
306
+
307
+ parser.add_argument(
308
+ "--class_data_dir",
309
+ type=str,
310
+ default=None,
311
+ required=False,
312
+ help="A folder containing the training data of class images.",
313
+ )
314
+ parser.add_argument(
315
+ "--instance_prompt",
316
+ type=str,
317
+ default=None,
318
+ required=True,
319
+ help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'",
320
+ )
321
+ parser.add_argument(
322
+ "--token_abstraction",
323
+ type=str,
324
+ default="TOK",
325
+ help="identifier specifying the instance(or instances) as used in instance_prompt, validation prompt, "
326
+ "captions - e.g. TOK. To use multiple identifiers, please specify them in a comma seperated string - e.g. "
327
+ "'TOK,TOK2,TOK3' etc.",
328
+ )
329
+
330
+ parser.add_argument(
331
+ "--num_new_tokens_per_abstraction",
332
+ type=int,
333
+ default=2,
334
+ help="number of new tokens inserted to the tokenizers per token_abstraction identifier when "
335
+ "--train_text_encoder_ti = True. By default, each --token_abstraction (e.g. TOK) is mapped to 2 new "
336
+ "tokens - <si><si+1> ",
337
+ )
338
+
339
+ parser.add_argument(
340
+ "--class_prompt",
341
+ type=str,
342
+ default=None,
343
+ help="The prompt to specify images in the same class as provided instance images.",
344
+ )
345
+ parser.add_argument(
346
+ "--validation_prompt",
347
+ type=str,
348
+ default=None,
349
+ help="A prompt that is used during validation to verify that the model is learning.",
350
+ )
351
+ parser.add_argument(
352
+ "--num_validation_images",
353
+ type=int,
354
+ default=4,
355
+ help="Number of images that should be generated during validation with `validation_prompt`.",
356
+ )
357
+ parser.add_argument(
358
+ "--validation_epochs",
359
+ type=int,
360
+ default=50,
361
+ help=(
362
+ "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
363
+ " `args.validation_prompt` multiple times: `args.num_validation_images`."
364
+ ),
365
+ )
366
+ parser.add_argument(
367
+ "--with_prior_preservation",
368
+ default=False,
369
+ action="store_true",
370
+ help="Flag to add prior preservation loss.",
371
+ )
372
+ parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
373
+ parser.add_argument(
374
+ "--num_class_images",
375
+ type=int,
376
+ default=100,
377
+ help=(
378
+ "Minimal class images for prior preservation loss. If there are not enough images already present in"
379
+ " class_data_dir, additional images will be sampled with class_prompt."
380
+ ),
381
+ )
382
+ parser.add_argument(
383
+ "--output_dir",
384
+ type=str,
385
+ default="lora-dreambooth-model",
386
+ help="The output directory where the model predictions and checkpoints will be written.",
387
+ )
388
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
389
+ parser.add_argument(
390
+ "--resolution",
391
+ type=int,
392
+ default=1024,
393
+ help=(
394
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
395
+ " resolution"
396
+ ),
397
+ )
398
+ parser.add_argument(
399
+ "--crops_coords_top_left_h",
400
+ type=int,
401
+ default=0,
402
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
403
+ )
404
+ parser.add_argument(
405
+ "--crops_coords_top_left_w",
406
+ type=int,
407
+ default=0,
408
+ help=("Coordinate for (the height) to be included in the crop coordinate embeddings needed by SDXL UNet."),
409
+ )
410
+ parser.add_argument(
411
+ "--center_crop",
412
+ default=False,
413
+ action="store_true",
414
+ help=(
415
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
416
+ " cropped. The images will be resized to the resolution first before cropping."
417
+ ),
418
+ )
419
+ parser.add_argument(
420
+ "--train_text_encoder",
421
+ action="store_true",
422
+ help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
423
+ )
424
+ parser.add_argument(
425
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
426
+ )
427
+ parser.add_argument(
428
+ "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
429
+ )
430
+ parser.add_argument("--num_train_epochs", type=int, default=1)
431
+ parser.add_argument(
432
+ "--max_train_steps",
433
+ type=int,
434
+ default=None,
435
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
436
+ )
437
+ parser.add_argument(
438
+ "--checkpointing_steps",
439
+ type=int,
440
+ default=500,
441
+ help=(
442
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
443
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
444
+ " training using `--resume_from_checkpoint`."
445
+ ),
446
+ )
447
+ parser.add_argument(
448
+ "--checkpoints_total_limit",
449
+ type=int,
450
+ default=None,
451
+ help=("Max number of checkpoints to store."),
452
+ )
453
+ parser.add_argument(
454
+ "--resume_from_checkpoint",
455
+ type=str,
456
+ default=None,
457
+ help=(
458
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
459
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
460
+ ),
461
+ )
462
+ parser.add_argument(
463
+ "--gradient_accumulation_steps",
464
+ type=int,
465
+ default=1,
466
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
467
+ )
468
+ parser.add_argument(
469
+ "--gradient_checkpointing",
470
+ action="store_true",
471
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
472
+ )
473
+ parser.add_argument(
474
+ "--learning_rate",
475
+ type=float,
476
+ default=1e-4,
477
+ help="Initial learning rate (after the potential warmup period) to use.",
478
+ )
479
+
480
+ parser.add_argument(
481
+ "--text_encoder_lr",
482
+ type=float,
483
+ default=5e-6,
484
+ help="Text encoder learning rate to use.",
485
+ )
486
+ parser.add_argument(
487
+ "--scale_lr",
488
+ action="store_true",
489
+ default=False,
490
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
491
+ )
492
+ parser.add_argument(
493
+ "--lr_scheduler",
494
+ type=str,
495
+ default="constant",
496
+ help=(
497
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
498
+ ' "constant", "constant_with_warmup"]'
499
+ ),
500
+ )
501
+
502
+ parser.add_argument(
503
+ "--snr_gamma",
504
+ type=float,
505
+ default=None,
506
+ help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
507
+ "More details here: https://arxiv.org/abs/2303.09556.",
508
+ )
509
+ parser.add_argument(
510
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
511
+ )
512
+ parser.add_argument(
513
+ "--lr_num_cycles",
514
+ type=int,
515
+ default=1,
516
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
517
+ )
518
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
519
+ parser.add_argument(
520
+ "--dataloader_num_workers",
521
+ type=int,
522
+ default=0,
523
+ help=(
524
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
525
+ ),
526
+ )
527
+
528
+ parser.add_argument(
529
+ "--train_text_encoder_ti",
530
+ action="store_true",
531
+ help=("Whether to use textual inversion"),
532
+ )
533
+
534
+ parser.add_argument(
535
+ "--train_text_encoder_ti_frac",
536
+ type=float,
537
+ default=0.5,
538
+ help=("The percentage of epochs to perform textual inversion"),
539
+ )
540
+
541
+ parser.add_argument(
542
+ "--train_text_encoder_frac",
543
+ type=float,
544
+ default=1.0,
545
+ help=("The percentage of epochs to perform text encoder tuning"),
546
+ )
547
+
548
+ parser.add_argument(
549
+ "--optimizer",
550
+ type=str,
551
+ default="adamW",
552
+ help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'),
553
+ )
554
+
555
+ parser.add_argument(
556
+ "--use_8bit_adam",
557
+ action="store_true",
558
+ help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
559
+ )
560
+
561
+ parser.add_argument(
562
+ "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers."
563
+ )
564
+ parser.add_argument(
565
+ "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers."
566
+ )
567
+ parser.add_argument(
568
+ "--prodigy_beta3",
569
+ type=float,
570
+ default=None,
571
+ help="coefficients for computing the Prodidy stepsize using running averages. If set to None, "
572
+ "uses the value of square root of beta2. Ignored if optimizer is adamW",
573
+ )
574
+ parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay")
575
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
576
+ parser.add_argument(
577
+ "--adam_weight_decay_text_encoder", type=float, default=None, help="Weight decay to use for text_encoder"
578
+ )
579
+
580
+ parser.add_argument(
581
+ "--adam_epsilon",
582
+ type=float,
583
+ default=1e-08,
584
+ help="Epsilon value for the Adam optimizer and Prodigy optimizers.",
585
+ )
586
+
587
+ parser.add_argument(
588
+ "--prodigy_use_bias_correction",
589
+ type=bool,
590
+ default=True,
591
+ help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW",
592
+ )
593
+ parser.add_argument(
594
+ "--prodigy_safeguard_warmup",
595
+ type=bool,
596
+ default=True,
597
+ help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. "
598
+ "Ignored if optimizer is adamW",
599
+ )
600
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
601
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
602
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
603
+ parser.add_argument(
604
+ "--hub_model_id",
605
+ type=str,
606
+ default=None,
607
+ help="The name of the repository to keep in sync with the local `output_dir`.",
608
+ )
609
+ parser.add_argument(
610
+ "--logging_dir",
611
+ type=str,
612
+ default="logs",
613
+ help=(
614
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
615
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
616
+ ),
617
+ )
618
+ parser.add_argument(
619
+ "--allow_tf32",
620
+ action="store_true",
621
+ help=(
622
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
623
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
624
+ ),
625
+ )
626
+ parser.add_argument(
627
+ "--report_to",
628
+ type=str,
629
+ default="tensorboard",
630
+ help=(
631
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
632
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
633
+ ),
634
+ )
635
+ parser.add_argument(
636
+ "--mixed_precision",
637
+ type=str,
638
+ default=None,
639
+ choices=["no", "fp16", "bf16"],
640
+ help=(
641
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
642
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
643
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
644
+ ),
645
+ )
646
+ parser.add_argument(
647
+ "--prior_generation_precision",
648
+ type=str,
649
+ default=None,
650
+ choices=["no", "fp32", "fp16", "bf16"],
651
+ help=(
652
+ "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
653
+ " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
654
+ ),
655
+ )
656
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
657
+ parser.add_argument(
658
+ "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
659
+ )
660
+ parser.add_argument(
661
+ "--rank",
662
+ type=int,
663
+ default=4,
664
+ help=("The dimension of the LoRA update matrices."),
665
+ )
666
+ parser.add_argument(
667
+ "--cache_latents",
668
+ action="store_true",
669
+ default=False,
670
+ help="Cache the VAE latents",
671
+ )
672
+
673
+ if input_args is not None:
674
+ args = parser.parse_args(input_args)
675
+ else:
676
+ args = parser.parse_args()
677
+
678
+ if args.dataset_name is None and args.instance_data_dir is None:
679
+ raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`")
680
+
681
+ if args.dataset_name is not None and args.instance_data_dir is not None:
682
+ raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`")
683
+
684
+ if args.train_text_encoder and args.train_text_encoder_ti:
685
+ raise ValueError(
686
+ "Specify only one of `--train_text_encoder` or `--train_text_encoder_ti. "
687
+ "For full LoRA text encoder training check --train_text_encoder, for textual "
688
+ "inversion training check `--train_text_encoder_ti`"
689
+ )
690
+
691
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
692
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
693
+ args.local_rank = env_local_rank
694
+
695
+ if args.with_prior_preservation:
696
+ if args.class_data_dir is None:
697
+ raise ValueError("You must specify a data directory for class images.")
698
+ if args.class_prompt is None:
699
+ raise ValueError("You must specify prompt for class images.")
700
+ else:
701
+ # logger is not available yet
702
+ if args.class_data_dir is not None:
703
+ warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
704
+ if args.class_prompt is not None:
705
+ warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
706
+
707
+ return args
708
+
709
+
710
+ # Taken from https://github.com/replicate/cog-sdxl/blob/main/dataset_and_utils.py
711
+ class TokenEmbeddingsHandler:
712
+ def __init__(self, text_encoders, tokenizers):
713
+ self.text_encoders = text_encoders
714
+ self.tokenizers = tokenizers
715
+
716
+ self.train_ids: Optional[torch.Tensor] = None
717
+ self.inserting_toks: Optional[List[str]] = None
718
+ self.embeddings_settings = {}
719
+
720
+ def initialize_new_tokens(self, inserting_toks: List[str]):
721
+ idx = 0
722
+ for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders):
723
+ assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings."
724
+ assert all(
725
+ isinstance(tok, str) for tok in inserting_toks
726
+ ), "All elements in inserting_toks should be strings."
727
+
728
+ self.inserting_toks = inserting_toks
729
+ special_tokens_dict = {"additional_special_tokens": self.inserting_toks}
730
+ tokenizer.add_special_tokens(special_tokens_dict)
731
+ text_encoder.resize_token_embeddings(len(tokenizer))
732
+
733
+ self.train_ids = tokenizer.convert_tokens_to_ids(self.inserting_toks)
734
+
735
+ # random initialization of new tokens
736
+ std_token_embedding = text_encoder.text_model.embeddings.token_embedding.weight.data.std()
737
+
738
+ print(f"{idx} text encodedr's std_token_embedding: {std_token_embedding}")
739
+
740
+ text_encoder.text_model.embeddings.token_embedding.weight.data[self.train_ids] = (
741
+ torch.randn(len(self.train_ids), text_encoder.text_model.config.hidden_size)
742
+ .to(device=self.device)
743
+ .to(dtype=self.dtype)
744
+ * std_token_embedding
745
+ )
746
+ self.embeddings_settings[
747
+ f"original_embeddings_{idx}"
748
+ ] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
749
+ self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding
750
+
751
+ inu = torch.ones((len(tokenizer),), dtype=torch.bool)
752
+ inu[self.train_ids] = False
753
+
754
+ self.embeddings_settings[f"index_no_updates_{idx}"] = inu
755
+
756
+ print(self.embeddings_settings[f"index_no_updates_{idx}"].shape)
757
+
758
+ idx += 1
759
+
760
+ def save_embeddings(self, file_path: str):
761
+ assert self.train_ids is not None, "Initialize new tokens before saving embeddings."
762
+ tensors = {}
763
+ # text_encoder_0 - CLIP ViT-L/14, text_encoder_1 - CLIP ViT-G/14
764
+ idx_to_text_encoder_name = {0: "clip_l", 1: "clip_g"}
765
+ for idx, text_encoder in enumerate(self.text_encoders):
766
+ assert text_encoder.text_model.embeddings.token_embedding.weight.data.shape[0] == len(
767
+ self.tokenizers[0]
768
+ ), "Tokenizers should be the same."
769
+ new_token_embeddings = text_encoder.text_model.embeddings.token_embedding.weight.data[self.train_ids]
770
+
771
+ # New tokens for each text encoder are saved under "clip_l" (for text_encoder 0), "clip_g" (for
772
+ # text_encoder 1) to keep compatible with the ecosystem.
773
+ # Note: When loading with diffusers, any name can work - simply specify in inference
774
+ tensors[idx_to_text_encoder_name[idx]] = new_token_embeddings
775
+ # tensors[f"text_encoders_{idx}"] = new_token_embeddings
776
+
777
+ save_file(tensors, file_path)
778
+
779
+ @property
780
+ def dtype(self):
781
+ return self.text_encoders[0].dtype
782
+
783
+ @property
784
+ def device(self):
785
+ return self.text_encoders[0].device
786
+
787
+ @torch.no_grad()
788
+ def retract_embeddings(self):
789
+ for idx, text_encoder in enumerate(self.text_encoders):
790
+ index_no_updates = self.embeddings_settings[f"index_no_updates_{idx}"]
791
+ text_encoder.text_model.embeddings.token_embedding.weight.data[index_no_updates] = (
792
+ self.embeddings_settings[f"original_embeddings_{idx}"][index_no_updates]
793
+ .to(device=text_encoder.device)
794
+ .to(dtype=text_encoder.dtype)
795
+ )
796
+
797
+ # for the parts that were updated, we need to normalize them
798
+ # to have the same std as before
799
+ std_token_embedding = self.embeddings_settings[f"std_token_embedding_{idx}"]
800
+
801
+ index_updates = ~index_no_updates
802
+ new_embeddings = text_encoder.text_model.embeddings.token_embedding.weight.data[index_updates]
803
+ off_ratio = std_token_embedding / new_embeddings.std()
804
+
805
+ new_embeddings = new_embeddings * (off_ratio**0.1)
806
+ text_encoder.text_model.embeddings.token_embedding.weight.data[index_updates] = new_embeddings
807
+
808
+
809
+ class DreamBoothDataset(Dataset):
810
+ """
811
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
812
+ It pre-processes the images.
813
+ """
814
+
815
+ def __init__(
816
+ self,
817
+ instance_data_root,
818
+ instance_prompt,
819
+ class_prompt,
820
+ dataset_name,
821
+ dataset_config_name,
822
+ cache_dir,
823
+ image_column,
824
+ caption_column,
825
+ train_text_encoder_ti,
826
+ class_data_root=None,
827
+ class_num=None,
828
+ token_abstraction_dict=None, # token mapping for textual inversion
829
+ size=1024,
830
+ repeats=1,
831
+ center_crop=False,
832
+ ):
833
+ self.size = size
834
+ self.center_crop = center_crop
835
+
836
+ self.instance_prompt = instance_prompt
837
+ self.custom_instance_prompts = None
838
+ self.class_prompt = class_prompt
839
+ self.token_abstraction_dict = token_abstraction_dict
840
+ self.train_text_encoder_ti = train_text_encoder_ti
841
+ # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory,
842
+ # we load the training data using load_dataset
843
+ if dataset_name is not None:
844
+ try:
845
+ from datasets import load_dataset
846
+ except ImportError:
847
+ raise ImportError(
848
+ "You are trying to load your data using the datasets library. If you wish to train using custom "
849
+ "captions please install the datasets library: `pip install datasets`. If you wish to load a "
850
+ "local folder containing images only, specify --instance_data_dir instead."
851
+ )
852
+ # Downloading and loading a dataset from the hub.
853
+ # See more about loading custom images at
854
+ # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
855
+ dataset = load_dataset(
856
+ dataset_name,
857
+ dataset_config_name,
858
+ cache_dir=cache_dir,
859
+ )
860
+ # Preprocessing the datasets.
861
+ column_names = dataset["train"].column_names
862
+
863
+ # 6. Get the column names for input/target.
864
+ if image_column is None:
865
+ image_column = column_names[0]
866
+ logger.info(f"image column defaulting to {image_column}")
867
+ else:
868
+ if image_column not in column_names:
869
+ raise ValueError(
870
+ f"`--image_column` value '{image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
871
+ )
872
+ instance_images = dataset["train"][image_column]
873
+
874
+ if caption_column is None:
875
+ logger.info(
876
+ "No caption column provided, defaulting to instance_prompt for all images. If your dataset "
877
+ "contains captions/prompts for the images, make sure to specify the "
878
+ "column as --caption_column"
879
+ )
880
+ self.custom_instance_prompts = None
881
+ else:
882
+ if caption_column not in column_names:
883
+ raise ValueError(
884
+ f"`--caption_column` value '{caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
885
+ )
886
+ custom_instance_prompts = dataset["train"][caption_column]
887
+ # create final list of captions according to --repeats
888
+ self.custom_instance_prompts = []
889
+ for caption in custom_instance_prompts:
890
+ self.custom_instance_prompts.extend(itertools.repeat(caption, repeats))
891
+ else:
892
+ self.instance_data_root = Path(instance_data_root)
893
+ if not self.instance_data_root.exists():
894
+ raise ValueError("Instance images root doesn't exists.")
895
+
896
+ instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())]
897
+ self.custom_instance_prompts = None
898
+
899
+ self.instance_images = []
900
+ for img in instance_images:
901
+ self.instance_images.extend(itertools.repeat(img, repeats))
902
+ self.num_instance_images = len(self.instance_images)
903
+ self._length = self.num_instance_images
904
+
905
+ if class_data_root is not None:
906
+ self.class_data_root = Path(class_data_root)
907
+ self.class_data_root.mkdir(parents=True, exist_ok=True)
908
+ self.class_images_path = list(self.class_data_root.iterdir())
909
+ if class_num is not None:
910
+ self.num_class_images = min(len(self.class_images_path), class_num)
911
+ else:
912
+ self.num_class_images = len(self.class_images_path)
913
+ self._length = max(self.num_class_images, self.num_instance_images)
914
+ else:
915
+ self.class_data_root = None
916
+
917
+ self.image_transforms = transforms.Compose(
918
+ [
919
+ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
920
+ transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
921
+ transforms.ToTensor(),
922
+ transforms.Normalize([0.5], [0.5]),
923
+ ]
924
+ )
925
+
926
+ def __len__(self):
927
+ return self._length
928
+
929
+ def __getitem__(self, index):
930
+ example = {}
931
+ instance_image = self.instance_images[index % self.num_instance_images]
932
+ instance_image = exif_transpose(instance_image)
933
+
934
+ if not instance_image.mode == "RGB":
935
+ instance_image = instance_image.convert("RGB")
936
+ example["instance_images"] = self.image_transforms(instance_image)
937
+
938
+ if self.custom_instance_prompts:
939
+ caption = self.custom_instance_prompts[index % self.num_instance_images]
940
+ if caption:
941
+ if self.train_text_encoder_ti:
942
+ # replace instances of --token_abstraction in caption with the new tokens: "<si><si+1>" etc.
943
+ for token_abs, token_replacement in self.token_abstraction_dict.items():
944
+ caption = caption.replace(token_abs, "".join(token_replacement))
945
+ example["instance_prompt"] = caption
946
+ else:
947
+ example["instance_prompt"] = self.instance_prompt
948
+
949
+ else: # costum prompts were provided, but length does not match size of image dataset
950
+ example["instance_prompt"] = self.instance_prompt
951
+
952
+ if self.class_data_root:
953
+ class_image = Image.open(self.class_images_path[index % self.num_class_images])
954
+ class_image = exif_transpose(class_image)
955
+
956
+ if not class_image.mode == "RGB":
957
+ class_image = class_image.convert("RGB")
958
+ example["class_images"] = self.image_transforms(class_image)
959
+ example["class_prompt"] = self.class_prompt
960
+
961
+ return example
962
+
963
+
964
+ def collate_fn(examples, with_prior_preservation=False):
965
+ pixel_values = [example["instance_images"] for example in examples]
966
+ prompts = [example["instance_prompt"] for example in examples]
967
+
968
+ # Concat class and instance examples for prior preservation.
969
+ # We do this to avoid doing two forward passes.
970
+ if with_prior_preservation:
971
+ pixel_values += [example["class_images"] for example in examples]
972
+ prompts += [example["class_prompt"] for example in examples]
973
+
974
+ pixel_values = torch.stack(pixel_values)
975
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
976
+
977
+ batch = {"pixel_values": pixel_values, "prompts": prompts}
978
+ return batch
979
+
980
+
981
+ class PromptDataset(Dataset):
982
+ "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
983
+
984
+ def __init__(self, prompt, num_samples):
985
+ self.prompt = prompt
986
+ self.num_samples = num_samples
987
+
988
+ def __len__(self):
989
+ return self.num_samples
990
+
991
+ def __getitem__(self, index):
992
+ example = {}
993
+ example["prompt"] = self.prompt
994
+ example["index"] = index
995
+ return example
996
+
997
+
998
+ def tokenize_prompt(tokenizer, prompt, add_special_tokens=False):
999
+ text_inputs = tokenizer(
1000
+ prompt,
1001
+ padding="max_length",
1002
+ max_length=tokenizer.model_max_length,
1003
+ truncation=True,
1004
+ add_special_tokens=add_special_tokens,
1005
+ return_tensors="pt",
1006
+ )
1007
+ text_input_ids = text_inputs.input_ids
1008
+ return text_input_ids
1009
+
1010
+
1011
+ # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
1012
+ def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
1013
+ prompt_embeds_list = []
1014
+
1015
+ for i, text_encoder in enumerate(text_encoders):
1016
+ if tokenizers is not None:
1017
+ tokenizer = tokenizers[i]
1018
+ text_input_ids = tokenize_prompt(tokenizer, prompt)
1019
+ else:
1020
+ assert text_input_ids_list is not None
1021
+ text_input_ids = text_input_ids_list[i]
1022
+
1023
+ prompt_embeds = text_encoder(
1024
+ text_input_ids.to(text_encoder.device),
1025
+ output_hidden_states=True,
1026
+ )
1027
+
1028
+ # We are only ALWAYS interested in the pooled output of the final text encoder
1029
+ pooled_prompt_embeds = prompt_embeds[0]
1030
+ prompt_embeds = prompt_embeds.hidden_states[-2]
1031
+ bs_embed, seq_len, _ = prompt_embeds.shape
1032
+ prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
1033
+ prompt_embeds_list.append(prompt_embeds)
1034
+
1035
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
1036
+ pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
1037
+ return prompt_embeds, pooled_prompt_embeds
1038
+
1039
+
1040
+ def main(args):
1041
+ logging_dir = Path(args.output_dir, args.logging_dir)
1042
+
1043
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
1044
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
1045
+ accelerator = Accelerator(
1046
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
1047
+ mixed_precision=args.mixed_precision,
1048
+ log_with=args.report_to,
1049
+ project_config=accelerator_project_config,
1050
+ kwargs_handlers=[kwargs],
1051
+ )
1052
+
1053
+ if args.report_to == "wandb":
1054
+ if not is_wandb_available():
1055
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
1056
+ import wandb
1057
+
1058
+ # Make one log on every process with the configuration for debugging.
1059
+ logging.basicConfig(
1060
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
1061
+ datefmt="%m/%d/%Y %H:%M:%S",
1062
+ level=logging.INFO,
1063
+ )
1064
+ logger.info(accelerator.state, main_process_only=False)
1065
+ if accelerator.is_local_main_process:
1066
+ transformers.utils.logging.set_verbosity_warning()
1067
+ diffusers.utils.logging.set_verbosity_info()
1068
+ else:
1069
+ transformers.utils.logging.set_verbosity_error()
1070
+ diffusers.utils.logging.set_verbosity_error()
1071
+
1072
+ # If passed along, set the training seed now.
1073
+ if args.seed is not None:
1074
+ set_seed(args.seed)
1075
+
1076
+ # Generate class images if prior preservation is enabled.
1077
+ if args.with_prior_preservation:
1078
+ class_images_dir = Path(args.class_data_dir)
1079
+ if not class_images_dir.exists():
1080
+ class_images_dir.mkdir(parents=True)
1081
+ cur_class_images = len(list(class_images_dir.iterdir()))
1082
+
1083
+ if cur_class_images < args.num_class_images:
1084
+ torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
1085
+ if args.prior_generation_precision == "fp32":
1086
+ torch_dtype = torch.float32
1087
+ elif args.prior_generation_precision == "fp16":
1088
+ torch_dtype = torch.float16
1089
+ elif args.prior_generation_precision == "bf16":
1090
+ torch_dtype = torch.bfloat16
1091
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1092
+ args.pretrained_model_name_or_path,
1093
+ torch_dtype=torch_dtype,
1094
+ revision=args.revision,
1095
+ variant=args.variant,
1096
+ )
1097
+ pipeline.set_progress_bar_config(disable=True)
1098
+
1099
+ num_new_images = args.num_class_images - cur_class_images
1100
+ logger.info(f"Number of class images to sample: {num_new_images}.")
1101
+
1102
+ sample_dataset = PromptDataset(args.class_prompt, num_new_images)
1103
+ sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
1104
+
1105
+ sample_dataloader = accelerator.prepare(sample_dataloader)
1106
+ pipeline.to(accelerator.device)
1107
+
1108
+ for example in tqdm(
1109
+ sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
1110
+ ):
1111
+ images = pipeline(example["prompt"]).images
1112
+
1113
+ for i, image in enumerate(images):
1114
+ hash_image = hashlib.sha1(image.tobytes()).hexdigest()
1115
+ image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
1116
+ image.save(image_filename)
1117
+
1118
+ del pipeline
1119
+ if torch.cuda.is_available():
1120
+ torch.cuda.empty_cache()
1121
+
1122
+ # Handle the repository creation
1123
+ if accelerator.is_main_process:
1124
+ if args.output_dir is not None:
1125
+ os.makedirs(args.output_dir, exist_ok=True)
1126
+
1127
+ model_id = args.hub_model_id or Path(args.output_dir).name
1128
+ repo_id = None
1129
+ if args.push_to_hub:
1130
+ repo_id = create_repo(repo_id=model_id, exist_ok=True, token=args.hub_token).repo_id
1131
+
1132
+ # Load the tokenizers
1133
+ tokenizer_one = AutoTokenizer.from_pretrained(
1134
+ args.pretrained_model_name_or_path,
1135
+ subfolder="tokenizer",
1136
+ revision=args.revision,
1137
+ variant=args.variant,
1138
+ use_fast=False,
1139
+ )
1140
+ tokenizer_two = AutoTokenizer.from_pretrained(
1141
+ args.pretrained_model_name_or_path,
1142
+ subfolder="tokenizer_2",
1143
+ revision=args.revision,
1144
+ variant=args.variant,
1145
+ use_fast=False,
1146
+ )
1147
+
1148
+ # import correct text encoder classes
1149
+ text_encoder_cls_one = import_model_class_from_model_name_or_path(
1150
+ args.pretrained_model_name_or_path, args.revision
1151
+ )
1152
+ text_encoder_cls_two = import_model_class_from_model_name_or_path(
1153
+ args.pretrained_model_name_or_path, args.revision, subfolder="text_encoder_2"
1154
+ )
1155
+
1156
+ # Load scheduler and models
1157
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
1158
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1159
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
1160
+ )
1161
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
1162
+ args.pretrained_model_name_or_path, subfolder="text_encoder_2", revision=args.revision, variant=args.variant
1163
+ )
1164
+ vae_path = (
1165
+ args.pretrained_model_name_or_path
1166
+ if args.pretrained_vae_model_name_or_path is None
1167
+ else args.pretrained_vae_model_name_or_path
1168
+ )
1169
+ vae = AutoencoderKL.from_pretrained(
1170
+ vae_path,
1171
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
1172
+ revision=args.revision,
1173
+ variant=args.variant,
1174
+ )
1175
+ vae_scaling_factor = vae.config.scaling_factor
1176
+ unet = UNet2DConditionModel.from_pretrained(
1177
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
1178
+ )
1179
+
1180
+ if args.train_text_encoder_ti:
1181
+ # we parse the provided token identifier (or identifiers) into a list. s.t. - "TOK" -> ["TOK"], "TOK,
1182
+ # TOK2" -> ["TOK", "TOK2"] etc.
1183
+ token_abstraction_list = "".join(args.token_abstraction.split()).split(",")
1184
+ logger.info(f"list of token identifiers: {token_abstraction_list}")
1185
+
1186
+ token_abstraction_dict = {}
1187
+ token_idx = 0
1188
+ for i, token in enumerate(token_abstraction_list):
1189
+ token_abstraction_dict[token] = [
1190
+ f"<s{token_idx + i + j}>" for j in range(args.num_new_tokens_per_abstraction)
1191
+ ]
1192
+ token_idx += args.num_new_tokens_per_abstraction - 1
1193
+
1194
+ # replace instances of --token_abstraction in --instance_prompt with the new tokens: "<si><si+1>" etc.
1195
+ for token_abs, token_replacement in token_abstraction_dict.items():
1196
+ args.instance_prompt = args.instance_prompt.replace(token_abs, "".join(token_replacement))
1197
+ if args.with_prior_preservation:
1198
+ args.class_prompt = args.class_prompt.replace(token_abs, "".join(token_replacement))
1199
+
1200
+ # initialize the new tokens for textual inversion
1201
+ embedding_handler = TokenEmbeddingsHandler(
1202
+ [text_encoder_one, text_encoder_two], [tokenizer_one, tokenizer_two]
1203
+ )
1204
+ inserting_toks = []
1205
+ for new_tok in token_abstraction_dict.values():
1206
+ inserting_toks.extend(new_tok)
1207
+ embedding_handler.initialize_new_tokens(inserting_toks=inserting_toks)
1208
+
1209
+ # We only train the additional adapter LoRA layers
1210
+ vae.requires_grad_(False)
1211
+ text_encoder_one.requires_grad_(False)
1212
+ text_encoder_two.requires_grad_(False)
1213
+ unet.requires_grad_(False)
1214
+
1215
+ # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
1216
+ # as these weights are only used for inference, keeping weights in full precision is not required.
1217
+ weight_dtype = torch.float32
1218
+ if accelerator.mixed_precision == "fp16":
1219
+ weight_dtype = torch.float16
1220
+ elif accelerator.mixed_precision == "bf16":
1221
+ weight_dtype = torch.bfloat16
1222
+
1223
+ # Move unet, vae and text_encoder to device and cast to weight_dtype
1224
+ unet.to(accelerator.device, dtype=weight_dtype)
1225
+
1226
+ # The VAE is always in float32 to avoid NaN losses.
1227
+ vae.to(accelerator.device, dtype=torch.float32)
1228
+
1229
+ text_encoder_one.to(accelerator.device, dtype=weight_dtype)
1230
+ text_encoder_two.to(accelerator.device, dtype=weight_dtype)
1231
+
1232
+ if args.enable_xformers_memory_efficient_attention:
1233
+ if is_xformers_available():
1234
+ import xformers
1235
+
1236
+ xformers_version = version.parse(xformers.__version__)
1237
+ if xformers_version == version.parse("0.0.16"):
1238
+ logger.warn(
1239
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
1240
+ "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
1241
+ )
1242
+ unet.enable_xformers_memory_efficient_attention()
1243
+ else:
1244
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
1245
+
1246
+ if args.gradient_checkpointing:
1247
+ unet.enable_gradient_checkpointing()
1248
+ if args.train_text_encoder:
1249
+ text_encoder_one.gradient_checkpointing_enable()
1250
+ text_encoder_two.gradient_checkpointing_enable()
1251
+
1252
+ # now we will add new LoRA weights to the attention layers
1253
+ unet_lora_config = LoraConfig(
1254
+ r=args.rank,
1255
+ lora_alpha=args.rank,
1256
+ init_lora_weights="gaussian",
1257
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
1258
+ )
1259
+ unet.add_adapter(unet_lora_config)
1260
+
1261
+ # The text encoder comes from 🤗 transformers, so we cannot directly modify it.
1262
+ # So, instead, we monkey-patch the forward calls of its attention-blocks.
1263
+ if args.train_text_encoder:
1264
+ text_lora_config = LoraConfig(
1265
+ r=args.rank,
1266
+ lora_alpha=args.rank,
1267
+ init_lora_weights="gaussian",
1268
+ target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
1269
+ )
1270
+ text_encoder_one.add_adapter(text_lora_config)
1271
+ text_encoder_two.add_adapter(text_lora_config)
1272
+
1273
+ # if we use textual inversion, we freeze all parameters except for the token embeddings
1274
+ # in text encoder
1275
+ elif args.train_text_encoder_ti:
1276
+ text_lora_parameters_one = []
1277
+ for name, param in text_encoder_one.named_parameters():
1278
+ if "token_embedding" in name:
1279
+ # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
1280
+ param = param.to(dtype=torch.float32)
1281
+ param.requires_grad = True
1282
+ text_lora_parameters_one.append(param)
1283
+ else:
1284
+ param.requires_grad = False
1285
+ text_lora_parameters_two = []
1286
+ for name, param in text_encoder_two.named_parameters():
1287
+ if "token_embedding" in name:
1288
+ # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
1289
+ param = param.to(dtype=torch.float32)
1290
+ param.requires_grad = True
1291
+ text_lora_parameters_two.append(param)
1292
+ else:
1293
+ param.requires_grad = False
1294
+
1295
+ # Make sure the trainable params are in float32.
1296
+ if args.mixed_precision == "fp16":
1297
+ models = [unet]
1298
+ if args.train_text_encoder:
1299
+ models.extend([text_encoder_one, text_encoder_two])
1300
+ for model in models:
1301
+ for param in model.parameters():
1302
+ # only upcast trainable parameters (LoRA) into fp32
1303
+ if param.requires_grad:
1304
+ param.data = param.to(torch.float32)
1305
+
1306
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
1307
+ def save_model_hook(models, weights, output_dir):
1308
+ if accelerator.is_main_process:
1309
+ # there are only two options here. Either are just the unet attn processor layers
1310
+ # or there are the unet and text encoder atten layers
1311
+ unet_lora_layers_to_save = None
1312
+ text_encoder_one_lora_layers_to_save = None
1313
+ text_encoder_two_lora_layers_to_save = None
1314
+
1315
+ for model in models:
1316
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
1317
+ unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
1318
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
1319
+ text_encoder_one_lora_layers_to_save = convert_state_dict_to_diffusers(
1320
+ get_peft_model_state_dict(model)
1321
+ )
1322
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
1323
+ text_encoder_two_lora_layers_to_save = convert_state_dict_to_diffusers(
1324
+ get_peft_model_state_dict(model)
1325
+ )
1326
+ else:
1327
+ raise ValueError(f"unexpected save model: {model.__class__}")
1328
+
1329
+ # make sure to pop weight so that corresponding model is not saved again
1330
+ weights.pop()
1331
+
1332
+ StableDiffusionXLPipeline.save_lora_weights(
1333
+ output_dir,
1334
+ unet_lora_layers=unet_lora_layers_to_save,
1335
+ text_encoder_lora_layers=text_encoder_one_lora_layers_to_save,
1336
+ text_encoder_2_lora_layers=text_encoder_two_lora_layers_to_save,
1337
+ )
1338
+
1339
+ def load_model_hook(models, input_dir):
1340
+ unet_ = None
1341
+ text_encoder_one_ = None
1342
+ text_encoder_two_ = None
1343
+
1344
+ while len(models) > 0:
1345
+ model = models.pop()
1346
+
1347
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
1348
+ unet_ = model
1349
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
1350
+ text_encoder_one_ = model
1351
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_two))):
1352
+ text_encoder_two_ = model
1353
+ else:
1354
+ raise ValueError(f"unexpected save model: {model.__class__}")
1355
+
1356
+ lora_state_dict, network_alphas = LoraLoaderMixin.lora_state_dict(input_dir)
1357
+ LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
1358
+
1359
+ text_encoder_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder." in k}
1360
+ LoraLoaderMixin.load_lora_into_text_encoder(
1361
+ text_encoder_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_one_
1362
+ )
1363
+
1364
+ text_encoder_2_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder_2." in k}
1365
+ LoraLoaderMixin.load_lora_into_text_encoder(
1366
+ text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_two_
1367
+ )
1368
+
1369
+ accelerator.register_save_state_pre_hook(save_model_hook)
1370
+ accelerator.register_load_state_pre_hook(load_model_hook)
1371
+
1372
+ # Enable TF32 for faster training on Ampere GPUs,
1373
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
1374
+ if args.allow_tf32:
1375
+ torch.backends.cuda.matmul.allow_tf32 = True
1376
+
1377
+ if args.scale_lr:
1378
+ args.learning_rate = (
1379
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
1380
+ )
1381
+
1382
+ unet_lora_parameters = list(filter(lambda p: p.requires_grad, unet.parameters()))
1383
+
1384
+ if args.train_text_encoder:
1385
+ text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters()))
1386
+ text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters()))
1387
+
1388
+ # If neither --train_text_encoder nor --train_text_encoder_ti, text_encoders remain frozen during training
1389
+ freeze_text_encoder = not (args.train_text_encoder or args.train_text_encoder_ti)
1390
+
1391
+ # Optimization parameters
1392
+ unet_lora_parameters_with_lr = {"params": unet_lora_parameters, "lr": args.learning_rate}
1393
+ if not freeze_text_encoder:
1394
+ # different learning rate for text encoder and unet
1395
+ text_lora_parameters_one_with_lr = {
1396
+ "params": text_lora_parameters_one,
1397
+ "weight_decay": args.adam_weight_decay_text_encoder
1398
+ if args.adam_weight_decay_text_encoder
1399
+ else args.adam_weight_decay,
1400
+ "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
1401
+ }
1402
+ text_lora_parameters_two_with_lr = {
1403
+ "params": text_lora_parameters_two,
1404
+ "weight_decay": args.adam_weight_decay_text_encoder
1405
+ if args.adam_weight_decay_text_encoder
1406
+ else args.adam_weight_decay,
1407
+ "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
1408
+ }
1409
+ params_to_optimize = [
1410
+ unet_lora_parameters_with_lr,
1411
+ text_lora_parameters_one_with_lr,
1412
+ text_lora_parameters_two_with_lr,
1413
+ ]
1414
+ else:
1415
+ params_to_optimize = [unet_lora_parameters_with_lr]
1416
+
1417
+ # Optimizer creation
1418
+ if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
1419
+ logger.warn(
1420
+ f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
1421
+ "Defaulting to adamW"
1422
+ )
1423
+ args.optimizer = "adamw"
1424
+
1425
+ if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
1426
+ logger.warn(
1427
+ f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
1428
+ f"set to {args.optimizer.lower()}"
1429
+ )
1430
+
1431
+ if args.optimizer.lower() == "adamw":
1432
+ if args.use_8bit_adam:
1433
+ try:
1434
+ import bitsandbytes as bnb
1435
+ except ImportError:
1436
+ raise ImportError(
1437
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
1438
+ )
1439
+
1440
+ optimizer_class = bnb.optim.AdamW8bit
1441
+ else:
1442
+ optimizer_class = torch.optim.AdamW
1443
+
1444
+ optimizer = optimizer_class(
1445
+ params_to_optimize,
1446
+ betas=(args.adam_beta1, args.adam_beta2),
1447
+ weight_decay=args.adam_weight_decay,
1448
+ eps=args.adam_epsilon,
1449
+ )
1450
+
1451
+ if args.optimizer.lower() == "prodigy":
1452
+ try:
1453
+ import prodigyopt
1454
+ except ImportError:
1455
+ raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`")
1456
+
1457
+ optimizer_class = prodigyopt.Prodigy
1458
+
1459
+ if args.learning_rate <= 0.1:
1460
+ logger.warn(
1461
+ "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
1462
+ )
1463
+ if args.train_text_encoder and args.text_encoder_lr:
1464
+ logger.warn(
1465
+ f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
1466
+ f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
1467
+ f"When using prodigy only learning_rate is used as the initial learning rate."
1468
+ )
1469
+ # changes the learning rate of text_encoder_parameters_one and text_encoder_parameters_two to be
1470
+ # --learning_rate
1471
+ params_to_optimize[1]["lr"] = args.learning_rate
1472
+ params_to_optimize[2]["lr"] = args.learning_rate
1473
+
1474
+ optimizer = optimizer_class(
1475
+ params_to_optimize,
1476
+ lr=args.learning_rate,
1477
+ betas=(args.adam_beta1, args.adam_beta2),
1478
+ beta3=args.prodigy_beta3,
1479
+ weight_decay=args.adam_weight_decay,
1480
+ eps=args.adam_epsilon,
1481
+ decouple=args.prodigy_decouple,
1482
+ use_bias_correction=args.prodigy_use_bias_correction,
1483
+ safeguard_warmup=args.prodigy_safeguard_warmup,
1484
+ )
1485
+
1486
+ # Dataset and DataLoaders creation:
1487
+ train_dataset = DreamBoothDataset(
1488
+ instance_data_root=args.instance_data_dir,
1489
+ instance_prompt=args.instance_prompt,
1490
+ class_prompt=args.class_prompt,
1491
+ dataset_name=args.dataset_name,
1492
+ dataset_config_name=args.dataset_config_name,
1493
+ cache_dir=args.cache_dir,
1494
+ image_column=args.image_column,
1495
+ train_text_encoder_ti=args.train_text_encoder_ti,
1496
+ caption_column=args.caption_column,
1497
+ class_data_root=args.class_data_dir if args.with_prior_preservation else None,
1498
+ token_abstraction_dict=token_abstraction_dict if args.train_text_encoder_ti else None,
1499
+ class_num=args.num_class_images,
1500
+ size=args.resolution,
1501
+ repeats=args.repeats,
1502
+ center_crop=args.center_crop,
1503
+ )
1504
+
1505
+ train_dataloader = torch.utils.data.DataLoader(
1506
+ train_dataset,
1507
+ batch_size=args.train_batch_size,
1508
+ shuffle=True,
1509
+ collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
1510
+ num_workers=args.dataloader_num_workers,
1511
+ )
1512
+
1513
+ # Computes additional embeddings/ids required by the SDXL UNet.
1514
+ # regular text embeddings (when `train_text_encoder` is not True)
1515
+ # pooled text embeddings
1516
+ # time ids
1517
+
1518
+ def compute_time_ids():
1519
+ # Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
1520
+ original_size = (args.resolution, args.resolution)
1521
+ target_size = (args.resolution, args.resolution)
1522
+ crops_coords_top_left = (args.crops_coords_top_left_h, args.crops_coords_top_left_w)
1523
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1524
+ add_time_ids = torch.tensor([add_time_ids])
1525
+ add_time_ids = add_time_ids.to(accelerator.device, dtype=weight_dtype)
1526
+ return add_time_ids
1527
+
1528
+ if not args.train_text_encoder:
1529
+ tokenizers = [tokenizer_one, tokenizer_two]
1530
+ text_encoders = [text_encoder_one, text_encoder_two]
1531
+
1532
+ def compute_text_embeddings(prompt, text_encoders, tokenizers):
1533
+ with torch.no_grad():
1534
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
1535
+ prompt_embeds = prompt_embeds.to(accelerator.device)
1536
+ pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
1537
+ return prompt_embeds, pooled_prompt_embeds
1538
+
1539
+ # Handle instance prompt.
1540
+ instance_time_ids = compute_time_ids()
1541
+
1542
+ # If no type of tuning is done on the text_encoder and custom instance prompts are NOT
1543
+ # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid
1544
+ # the redundant encoding.
1545
+ if freeze_text_encoder and not train_dataset.custom_instance_prompts:
1546
+ instance_prompt_hidden_states, instance_pooled_prompt_embeds = compute_text_embeddings(
1547
+ args.instance_prompt, text_encoders, tokenizers
1548
+ )
1549
+
1550
+ # Handle class prompt for prior-preservation.
1551
+ if args.with_prior_preservation:
1552
+ class_time_ids = compute_time_ids()
1553
+ if freeze_text_encoder:
1554
+ class_prompt_hidden_states, class_pooled_prompt_embeds = compute_text_embeddings(
1555
+ args.class_prompt, text_encoders, tokenizers
1556
+ )
1557
+
1558
+ # Clear the memory here
1559
+ if freeze_text_encoder and not train_dataset.custom_instance_prompts:
1560
+ del tokenizers, text_encoders
1561
+ gc.collect()
1562
+ torch.cuda.empty_cache()
1563
+
1564
+ # If custom instance prompts are NOT provided (i.e. the instance prompt is used for all images),
1565
+ # pack the statically computed variables appropriately here. This is so that we don't
1566
+ # have to pass them to the dataloader.
1567
+ add_time_ids = instance_time_ids
1568
+ if args.with_prior_preservation:
1569
+ add_time_ids = torch.cat([add_time_ids, class_time_ids], dim=0)
1570
+
1571
+ # if --train_text_encoder_ti we need add_special_tokens to be True fo textual inversion
1572
+ add_special_tokens = True if args.train_text_encoder_ti else False
1573
+
1574
+ if not train_dataset.custom_instance_prompts:
1575
+ if freeze_text_encoder:
1576
+ prompt_embeds = instance_prompt_hidden_states
1577
+ unet_add_text_embeds = instance_pooled_prompt_embeds
1578
+ if args.with_prior_preservation:
1579
+ prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0)
1580
+ unet_add_text_embeds = torch.cat([unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
1581
+ # if we're optmizing the text encoder (both if instance prompt is used for all images or custom prompts) we need to tokenize and encode the
1582
+ # batch prompts on all training steps
1583
+ else:
1584
+ tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt, add_special_tokens)
1585
+ tokens_two = tokenize_prompt(tokenizer_two, args.instance_prompt, add_special_tokens)
1586
+ if args.with_prior_preservation:
1587
+ class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt, add_special_tokens)
1588
+ class_tokens_two = tokenize_prompt(tokenizer_two, args.class_prompt, add_special_tokens)
1589
+ tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0)
1590
+ tokens_two = torch.cat([tokens_two, class_tokens_two], dim=0)
1591
+
1592
+ if args.train_text_encoder_ti and args.validation_prompt:
1593
+ # replace instances of --token_abstraction in validation prompt with the new tokens: "<si><si+1>" etc.
1594
+ for token_abs, token_replacement in train_dataset.token_abstraction_dict.items():
1595
+ args.validation_prompt = args.validation_prompt.replace(token_abs, "".join(token_replacement))
1596
+ print("validation prompt:", args.validation_prompt)
1597
+
1598
+ if args.cache_latents:
1599
+ latents_cache = []
1600
+ for batch in tqdm(train_dataloader, desc="Caching latents"):
1601
+ with torch.no_grad():
1602
+ batch["pixel_values"] = batch["pixel_values"].to(
1603
+ accelerator.device, non_blocking=True, dtype=torch.float32
1604
+ )
1605
+ latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
1606
+
1607
+ if args.validation_prompt is None:
1608
+ del vae
1609
+ if torch.cuda.is_available():
1610
+ torch.cuda.empty_cache()
1611
+
1612
+ # Scheduler and math around the number of training steps.
1613
+ overrode_max_train_steps = False
1614
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1615
+ if args.max_train_steps is None:
1616
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1617
+ overrode_max_train_steps = True
1618
+
1619
+ lr_scheduler = get_scheduler(
1620
+ args.lr_scheduler,
1621
+ optimizer=optimizer,
1622
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1623
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
1624
+ num_cycles=args.lr_num_cycles,
1625
+ power=args.lr_power,
1626
+ )
1627
+
1628
+ # Prepare everything with our `accelerator`.
1629
+ if not freeze_text_encoder:
1630
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1631
+ unet, text_encoder_one, text_encoder_two, optimizer, train_dataloader, lr_scheduler
1632
+ )
1633
+ else:
1634
+ unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1635
+ unet, optimizer, train_dataloader, lr_scheduler
1636
+ )
1637
+
1638
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1639
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1640
+ if overrode_max_train_steps:
1641
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1642
+ # Afterwards we recalculate our number of training epochs
1643
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1644
+
1645
+ # We need to initialize the trackers we use, and also store our configuration.
1646
+ # The trackers initializes automatically on the main process.
1647
+ if accelerator.is_main_process:
1648
+ accelerator.init_trackers("dreambooth-lora-sd-xl", config=vars(args))
1649
+
1650
+ # Train!
1651
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1652
+
1653
+ logger.info("***** Running training *****")
1654
+ logger.info(f" Num examples = {len(train_dataset)}")
1655
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1656
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
1657
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1658
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1659
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1660
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1661
+ global_step = 0
1662
+ first_epoch = 0
1663
+
1664
+ # Potentially load in the weights and states from a previous save
1665
+ if args.resume_from_checkpoint:
1666
+ if args.resume_from_checkpoint != "latest":
1667
+ path = os.path.basename(args.resume_from_checkpoint)
1668
+ else:
1669
+ # Get the mos recent checkpoint
1670
+ dirs = os.listdir(args.output_dir)
1671
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1672
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1673
+ path = dirs[-1] if len(dirs) > 0 else None
1674
+
1675
+ if path is None:
1676
+ accelerator.print(
1677
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1678
+ )
1679
+ args.resume_from_checkpoint = None
1680
+ initial_global_step = 0
1681
+ else:
1682
+ accelerator.print(f"Resuming from checkpoint {path}")
1683
+ accelerator.load_state(os.path.join(args.output_dir, path))
1684
+ global_step = int(path.split("-")[1])
1685
+
1686
+ initial_global_step = global_step
1687
+ first_epoch = global_step // num_update_steps_per_epoch
1688
+
1689
+ else:
1690
+ initial_global_step = 0
1691
+
1692
+ progress_bar = tqdm(
1693
+ range(0, args.max_train_steps),
1694
+ initial=initial_global_step,
1695
+ desc="Steps",
1696
+ # Only show the progress bar once on each machine.
1697
+ disable=not accelerator.is_local_main_process,
1698
+ )
1699
+
1700
+ if args.train_text_encoder:
1701
+ num_train_epochs_text_encoder = int(args.train_text_encoder_frac * args.num_train_epochs)
1702
+ elif args.train_text_encoder_ti: # args.train_text_encoder_ti
1703
+ num_train_epochs_text_encoder = int(args.train_text_encoder_ti_frac * args.num_train_epochs)
1704
+
1705
+ for epoch in range(first_epoch, args.num_train_epochs):
1706
+ # if performing any kind of optimization of text_encoder params
1707
+ if args.train_text_encoder or args.train_text_encoder_ti:
1708
+ if epoch == num_train_epochs_text_encoder:
1709
+ print("PIVOT HALFWAY", epoch)
1710
+ # stopping optimization of text_encoder params
1711
+ # re setting the optimizer to optimize only on unet params
1712
+ optimizer.param_groups[1]["lr"] = 0.0
1713
+ optimizer.param_groups[2]["lr"] = 0.0
1714
+
1715
+ else:
1716
+ # still optimizng the text encoder
1717
+ text_encoder_one.train()
1718
+ text_encoder_two.train()
1719
+ # set top parameter requires_grad = True for gradient checkpointing works
1720
+ if args.train_text_encoder:
1721
+ text_encoder_one.text_model.embeddings.requires_grad_(True)
1722
+ text_encoder_two.text_model.embeddings.requires_grad_(True)
1723
+
1724
+ unet.train()
1725
+ for step, batch in enumerate(train_dataloader):
1726
+ with accelerator.accumulate(unet):
1727
+ prompts = batch["prompts"]
1728
+ # encode batch prompts when custom prompts are provided for each image -
1729
+ if train_dataset.custom_instance_prompts:
1730
+ if freeze_text_encoder:
1731
+ prompt_embeds, unet_add_text_embeds = compute_text_embeddings(
1732
+ prompts, text_encoders, tokenizers
1733
+ )
1734
+
1735
+ else:
1736
+ tokens_one = tokenize_prompt(tokenizer_one, prompts, add_special_tokens)
1737
+ tokens_two = tokenize_prompt(tokenizer_two, prompts, add_special_tokens)
1738
+
1739
+ if args.cache_latents:
1740
+ model_input = latents_cache[step].sample()
1741
+ else:
1742
+ pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
1743
+ model_input = vae.encode(pixel_values).latent_dist.sample()
1744
+
1745
+ model_input = model_input * vae_scaling_factor
1746
+ if args.pretrained_vae_model_name_or_path is None:
1747
+ model_input = model_input.to(weight_dtype)
1748
+
1749
+ # Sample noise that we'll add to the latents
1750
+ noise = torch.randn_like(model_input)
1751
+ bsz = model_input.shape[0]
1752
+ # Sample a random timestep for each image
1753
+ timesteps = torch.randint(
1754
+ 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
1755
+ )
1756
+ timesteps = timesteps.long()
1757
+
1758
+ # Add noise to the model input according to the noise magnitude at each timestep
1759
+ # (this is the forward diffusion process)
1760
+ noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
1761
+
1762
+ # Calculate the elements to repeat depending on the use of prior-preservation and custom captions.
1763
+ if not train_dataset.custom_instance_prompts:
1764
+ elems_to_repeat_text_embeds = bsz // 2 if args.with_prior_preservation else bsz
1765
+ elems_to_repeat_time_ids = bsz // 2 if args.with_prior_preservation else bsz
1766
+
1767
+ else:
1768
+ elems_to_repeat_text_embeds = 1
1769
+ elems_to_repeat_time_ids = bsz // 2 if args.with_prior_preservation else bsz
1770
+
1771
+ # Predict the noise residual
1772
+ if freeze_text_encoder:
1773
+ unet_added_conditions = {
1774
+ "time_ids": add_time_ids.repeat(elems_to_repeat_time_ids, 1),
1775
+ "text_embeds": unet_add_text_embeds.repeat(elems_to_repeat_text_embeds, 1),
1776
+ }
1777
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1778
+ model_pred = unet(
1779
+ noisy_model_input,
1780
+ timesteps,
1781
+ prompt_embeds_input,
1782
+ added_cond_kwargs=unet_added_conditions,
1783
+ ).sample
1784
+ else:
1785
+ unet_added_conditions = {"time_ids": add_time_ids.repeat(elems_to_repeat_time_ids, 1)}
1786
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(
1787
+ text_encoders=[text_encoder_one, text_encoder_two],
1788
+ tokenizers=None,
1789
+ prompt=None,
1790
+ text_input_ids_list=[tokens_one, tokens_two],
1791
+ )
1792
+ unet_added_conditions.update(
1793
+ {"text_embeds": pooled_prompt_embeds.repeat(elems_to_repeat_text_embeds, 1)}
1794
+ )
1795
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1796
+ model_pred = unet(
1797
+ noisy_model_input, timesteps, prompt_embeds_input, added_cond_kwargs=unet_added_conditions
1798
+ ).sample
1799
+
1800
+ # Get the target for loss depending on the prediction type
1801
+ if noise_scheduler.config.prediction_type == "epsilon":
1802
+ target = noise
1803
+ elif noise_scheduler.config.prediction_type == "v_prediction":
1804
+ target = noise_scheduler.get_velocity(model_input, noise, timesteps)
1805
+ else:
1806
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1807
+
1808
+ if args.with_prior_preservation:
1809
+ # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
1810
+ model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
1811
+ target, target_prior = torch.chunk(target, 2, dim=0)
1812
+
1813
+ # Compute prior loss
1814
+ prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
1815
+
1816
+ if args.snr_gamma is None:
1817
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1818
+ else:
1819
+ # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
1820
+ # Since we predict the noise instead of x_0, the original formulation is slightly changed.
1821
+ # This is discussed in Section 4.2 of the same paper.
1822
+
1823
+ if args.with_prior_preservation:
1824
+ # if we're using prior preservation, we calc snr for instance loss only -
1825
+ # and hence only need timesteps corresponding to instance images
1826
+ snr_timesteps, _ = torch.chunk(timesteps, 2, dim=0)
1827
+ else:
1828
+ snr_timesteps = timesteps
1829
+
1830
+ snr = compute_snr(noise_scheduler, snr_timesteps)
1831
+ base_weight = (
1832
+ torch.stack([snr, args.snr_gamma * torch.ones_like(snr_timesteps)], dim=1).min(dim=1)[0] / snr
1833
+ )
1834
+
1835
+ if noise_scheduler.config.prediction_type == "v_prediction":
1836
+ # Velocity objective needs to be floored to an SNR weight of one.
1837
+ mse_loss_weights = base_weight + 1
1838
+ else:
1839
+ # Epsilon and sample both use the same loss weights.
1840
+ mse_loss_weights = base_weight
1841
+
1842
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
1843
+ loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
1844
+ loss = loss.mean()
1845
+
1846
+ if args.with_prior_preservation:
1847
+ # Add the prior loss to the instance loss.
1848
+ loss = loss + args.prior_loss_weight * prior_loss
1849
+
1850
+ accelerator.backward(loss)
1851
+ if accelerator.sync_gradients:
1852
+ params_to_clip = (
1853
+ itertools.chain(unet_lora_parameters, text_lora_parameters_one, text_lora_parameters_two)
1854
+ if (args.train_text_encoder or args.train_text_encoder_ti)
1855
+ else unet_lora_parameters
1856
+ )
1857
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1858
+ optimizer.step()
1859
+ lr_scheduler.step()
1860
+ optimizer.zero_grad()
1861
+
1862
+ # every step, we reset the embeddings to the original embeddings.
1863
+ if args.train_text_encoder_ti:
1864
+ for idx, text_encoder in enumerate(text_encoders):
1865
+ embedding_handler.retract_embeddings()
1866
+
1867
+ # Checks if the accelerator has performed an optimization step behind the scenes
1868
+ if accelerator.sync_gradients:
1869
+ progress_bar.update(1)
1870
+ global_step += 1
1871
+
1872
+ if accelerator.is_main_process:
1873
+ if global_step % args.checkpointing_steps == 0:
1874
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1875
+ if args.checkpoints_total_limit is not None:
1876
+ checkpoints = os.listdir(args.output_dir)
1877
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1878
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1879
+
1880
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1881
+ if len(checkpoints) >= args.checkpoints_total_limit:
1882
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1883
+ removing_checkpoints = checkpoints[0:num_to_remove]
1884
+
1885
+ logger.info(
1886
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1887
+ )
1888
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1889
+
1890
+ for removing_checkpoint in removing_checkpoints:
1891
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1892
+ shutil.rmtree(removing_checkpoint)
1893
+
1894
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1895
+ accelerator.save_state(save_path)
1896
+ logger.info(f"Saved state to {save_path}")
1897
+
1898
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1899
+ progress_bar.set_postfix(**logs)
1900
+ accelerator.log(logs, step=global_step)
1901
+
1902
+ if global_step >= args.max_train_steps:
1903
+ break
1904
+
1905
+ if accelerator.is_main_process:
1906
+ if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
1907
+ logger.info(
1908
+ f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
1909
+ f" {args.validation_prompt}."
1910
+ )
1911
+ # create pipeline
1912
+ if freeze_text_encoder:
1913
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1914
+ args.pretrained_model_name_or_path,
1915
+ subfolder="text_encoder",
1916
+ revision=args.revision,
1917
+ variant=args.variant,
1918
+ )
1919
+ text_encoder_two = text_encoder_cls_two.from_pretrained(
1920
+ args.pretrained_model_name_or_path,
1921
+ subfolder="text_encoder_2",
1922
+ revision=args.revision,
1923
+ variant=args.variant,
1924
+ )
1925
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
1926
+ args.pretrained_model_name_or_path,
1927
+ vae=vae,
1928
+ text_encoder=accelerator.unwrap_model(text_encoder_one),
1929
+ text_encoder_2=accelerator.unwrap_model(text_encoder_two),
1930
+ unet=accelerator.unwrap_model(unet),
1931
+ revision=args.revision,
1932
+ variant=args.variant,
1933
+ torch_dtype=weight_dtype,
1934
+ )
1935
+
1936
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1937
+ scheduler_args = {}
1938
+
1939
+ if "variance_type" in pipeline.scheduler.config:
1940
+ variance_type = pipeline.scheduler.config.variance_type
1941
+
1942
+ if variance_type in ["learned", "learned_range"]:
1943
+ variance_type = "fixed_small"
1944
+
1945
+ scheduler_args["variance_type"] = variance_type
1946
+
1947
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
1948
+ pipeline.scheduler.config, **scheduler_args
1949
+ )
1950
+
1951
+ pipeline = pipeline.to(accelerator.device)
1952
+ pipeline.set_progress_bar_config(disable=True)
1953
+
1954
+ # run inference
1955
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
1956
+ pipeline_args = {"prompt": args.validation_prompt}
1957
+
1958
+ with torch.cuda.amp.autocast():
1959
+ images = [
1960
+ pipeline(**pipeline_args, generator=generator).images[0]
1961
+ for _ in range(args.num_validation_images)
1962
+ ]
1963
+
1964
+ for tracker in accelerator.trackers:
1965
+ if tracker.name == "tensorboard":
1966
+ np_images = np.stack([np.asarray(img) for img in images])
1967
+ tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
1968
+ if tracker.name == "wandb":
1969
+ tracker.log(
1970
+ {
1971
+ "validation": [
1972
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1973
+ for i, image in enumerate(images)
1974
+ ]
1975
+ }
1976
+ )
1977
+
1978
+ del pipeline
1979
+ torch.cuda.empty_cache()
1980
+
1981
+ # Save the lora layers
1982
+ accelerator.wait_for_everyone()
1983
+ if accelerator.is_main_process:
1984
+ unet = accelerator.unwrap_model(unet)
1985
+ unet = unet.to(torch.float32)
1986
+ unet_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
1987
+
1988
+ if args.train_text_encoder:
1989
+ text_encoder_one = accelerator.unwrap_model(text_encoder_one)
1990
+ text_encoder_lora_layers = convert_state_dict_to_diffusers(
1991
+ get_peft_model_state_dict(text_encoder_one.to(torch.float32))
1992
+ )
1993
+ text_encoder_two = accelerator.unwrap_model(text_encoder_two)
1994
+ text_encoder_2_lora_layers = convert_state_dict_to_diffusers(
1995
+ get_peft_model_state_dict(text_encoder_two.to(torch.float32))
1996
+ )
1997
+ else:
1998
+ text_encoder_lora_layers = None
1999
+ text_encoder_2_lora_layers = None
2000
+
2001
+ StableDiffusionXLPipeline.save_lora_weights(
2002
+ save_directory=args.output_dir,
2003
+ unet_lora_layers=unet_lora_layers,
2004
+ text_encoder_lora_layers=text_encoder_lora_layers,
2005
+ text_encoder_2_lora_layers=text_encoder_2_lora_layers,
2006
+ )
2007
+ images = []
2008
+ if args.validation_prompt and args.num_validation_images > 0:
2009
+ # Final inference
2010
+ # Load previous pipeline
2011
+ vae = AutoencoderKL.from_pretrained(
2012
+ vae_path,
2013
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
2014
+ revision=args.revision,
2015
+ variant=args.variant,
2016
+ torch_dtype=weight_dtype,
2017
+ )
2018
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
2019
+ args.pretrained_model_name_or_path,
2020
+ vae=vae,
2021
+ revision=args.revision,
2022
+ variant=args.variant,
2023
+ torch_dtype=weight_dtype,
2024
+ )
2025
+
2026
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
2027
+ scheduler_args = {}
2028
+
2029
+ if "variance_type" in pipeline.scheduler.config:
2030
+ variance_type = pipeline.scheduler.config.variance_type
2031
+
2032
+ if variance_type in ["learned", "learned_range"]:
2033
+ variance_type = "fixed_small"
2034
+
2035
+ scheduler_args["variance_type"] = variance_type
2036
+
2037
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
2038
+
2039
+ # load attention processors
2040
+ pipeline.load_lora_weights(args.output_dir)
2041
+
2042
+ # run inference
2043
+ pipeline = pipeline.to(accelerator.device)
2044
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
2045
+ images = [
2046
+ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
2047
+ for _ in range(args.num_validation_images)
2048
+ ]
2049
+
2050
+ for tracker in accelerator.trackers:
2051
+ if tracker.name == "tensorboard":
2052
+ np_images = np.stack([np.asarray(img) for img in images])
2053
+ tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
2054
+ if tracker.name == "wandb":
2055
+ tracker.log(
2056
+ {
2057
+ "test": [
2058
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
2059
+ for i, image in enumerate(images)
2060
+ ]
2061
+ }
2062
+ )
2063
+
2064
+ if args.train_text_encoder_ti:
2065
+ embedding_handler.save_embeddings(
2066
+ f"{args.output_dir}/{args.output_dir}_emb.safetensors",
2067
+ )
2068
+
2069
+ # Conver to WebUI format
2070
+ lora_state_dict = load_file(f"{args.output_dir}/pytorch_lora_weights.safetensors")
2071
+ peft_state_dict = convert_all_state_dict_to_peft(lora_state_dict)
2072
+ kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict)
2073
+ save_file(kohya_state_dict, f"{args.output_dir}/{args.output_dir}.safetensors")
2074
+
2075
+ save_model_card(
2076
+ model_id if not args.push_to_hub else repo_id,
2077
+ images=images,
2078
+ base_model=args.pretrained_model_name_or_path,
2079
+ train_text_encoder=args.train_text_encoder,
2080
+ train_text_encoder_ti=args.train_text_encoder_ti,
2081
+ token_abstraction_dict=train_dataset.token_abstraction_dict,
2082
+ instance_prompt=args.instance_prompt,
2083
+ validation_prompt=args.validation_prompt,
2084
+ repo_folder=args.output_dir,
2085
+ vae_path=args.pretrained_vae_model_name_or_path,
2086
+ )
2087
+ if args.push_to_hub:
2088
+ upload_folder(
2089
+ repo_id=repo_id,
2090
+ folder_path=args.output_dir,
2091
+ commit_message="End of training",
2092
+ ignore_patterns=["step_*", "epoch_*"],
2093
+ )
2094
+
2095
+ accelerator.end_training()
2096
+
2097
+
2098
+ if __name__ == "__main__":
2099
+ args = parse_args()
2100
+ main(args)