Bluwynd commited on
Commit
ee5a293
1 Parent(s): a409df7

Upload autotrain (1).ipynb

Browse files
Files changed (1) hide show
  1. autotrain (1).ipynb +773 -0
autotrain (1).ipynb ADDED
@@ -0,0 +1,773 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "efSw4FzN89ia",
11
+ "outputId": "e8733152-31fd-420f-8a14-0bca6af46641"
12
+ },
13
+ "outputs": [
14
+ {
15
+ "output_type": "stream",
16
+ "name": "stdout",
17
+ "text": [
18
+ "Cloning into '/content/Kohya-Colab'...\n",
19
+ "remote: Enumerating objects: 2158, done.\u001b[K\n",
20
+ "remote: Counting objects: 100% (893/893), done.\u001b[K\n",
21
+ "remote: Compressing objects: 100% (231/231), done.\u001b[K\n",
22
+ "remote: Total 2158 (delta 738), reused 671 (delta 662), pack-reused 1265 (from 1)\u001b[K\n",
23
+ "Receiving objects: 100% (2158/2158), 4.43 MiB | 6.82 MiB/s, done.\n",
24
+ "Resolving deltas: 100% (1418/1418), done.\n",
25
+ "The following additional packages will be installed:\n",
26
+ " libaria2-0 libc-ares2\n",
27
+ "The following NEW packages will be installed:\n",
28
+ " aria2 libaria2-0 libc-ares2\n",
29
+ "0 upgraded, 3 newly installed, 0 to remove and 49 not upgraded.\n",
30
+ "Need to get 1,513 kB of archives.\n",
31
+ "After this operation, 5,441 kB of additional disk space will be used.\n",
32
+ "Selecting previously unselected package libc-ares2:amd64.\n",
33
+ "(Reading database ... 123621 files and directories currently installed.)\n",
34
+ "Preparing to unpack .../libc-ares2_1.18.1-1ubuntu0.22.04.3_amd64.deb ...\n",
35
+ "Unpacking libc-ares2:amd64 (1.18.1-1ubuntu0.22.04.3) ...\n",
36
+ "Selecting previously unselected package libaria2-0:amd64.\n",
37
+ "Preparing to unpack .../libaria2-0_1.36.0-1_amd64.deb ...\n",
38
+ "Unpacking libaria2-0:amd64 (1.36.0-1) ...\n",
39
+ "Selecting previously unselected package aria2.\n",
40
+ "Preparing to unpack .../aria2_1.36.0-1_amd64.deb ...\n",
41
+ "Unpacking aria2 (1.36.0-1) ...\n",
42
+ "Setting up libc-ares2:amd64 (1.18.1-1ubuntu0.22.04.3) ...\n",
43
+ "Setting up libaria2-0:amd64 (1.36.0-1) ...\n",
44
+ "Setting up aria2 (1.36.0-1) ...\n",
45
+ "Processing triggers for man-db (2.10.2-1) ...\n",
46
+ "Processing triggers for libc-bin (2.35-0ubuntu3.4) ...\n",
47
+ "/sbin/ldconfig.real: /usr/local/lib/libur_loader.so.0 is not a symbolic link\n",
48
+ "\n",
49
+ "/sbin/ldconfig.real: /usr/local/lib/libur_adapter_opencl.so.0 is not a symbolic link\n",
50
+ "\n",
51
+ "/sbin/ldconfig.real: /usr/local/lib/libtbbbind_2_0.so.3 is not a symbolic link\n",
52
+ "\n",
53
+ "/sbin/ldconfig.real: /usr/local/lib/libur_adapter_level_zero.so.0 is not a symbolic link\n",
54
+ "\n",
55
+ "/sbin/ldconfig.real: /usr/local/lib/libtbbbind.so.3 is not a symbolic link\n",
56
+ "\n",
57
+ "/sbin/ldconfig.real: /usr/local/lib/libtbbmalloc_proxy.so.2 is not a symbolic link\n",
58
+ "\n",
59
+ "/sbin/ldconfig.real: /usr/local/lib/libtbb.so.12 is not a symbolic link\n",
60
+ "\n",
61
+ "/sbin/ldconfig.real: /usr/local/lib/libtbbbind_2_5.so.3 is not a symbolic link\n",
62
+ "\n",
63
+ "/sbin/ldconfig.real: /usr/local/lib/libtbbmalloc.so.2 is not a symbolic link\n",
64
+ "\n",
65
+ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
66
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m100.3/100.3 kB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
67
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m266.3/266.3 kB\u001b[0m \u001b[31m12.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
68
+ "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
69
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
70
+ " Installing backend dependencies ... \u001b[?25l\u001b[?25hdone\n",
71
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
72
+ " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
73
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
74
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
75
+ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
76
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.0/4.0 MB\u001b[0m \u001b[31m54.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
77
+ "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
78
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m43.9/43.9 kB\u001b[0m \u001b[31m3.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
79
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m191.5/191.5 kB\u001b[0m \u001b[31m13.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
80
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.3/6.3 MB\u001b[0m \u001b[31m82.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
81
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.1/53.1 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
82
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m125.7/125.7 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
83
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m61.7/61.7 MB\u001b[0m \u001b[31m14.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
84
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.6/41.6 kB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
85
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m503.1/503.1 kB\u001b[0m \u001b[31m28.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
86
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m825.8/825.8 kB\u001b[0m \u001b[31m41.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
87
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.6/92.6 MB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
88
+ "\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m475.2/475.2 MB\u001b[0m \u001b[31m130.3 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m"
89
+ ]
90
+ }
91
+ ],
92
+ "source": [
93
+ "import os\n",
94
+ "import zipfile\n",
95
+ "import shutil\n",
96
+ "from subprocess import getoutput\n",
97
+ "from IPython.utils import capture\n",
98
+ "import random\n",
99
+ "import concurrent.futures\n",
100
+ "from tqdm import tqdm\n",
101
+ "from PIL import Image\n",
102
+ "import time\n",
103
+ "import re\n",
104
+ "import json\n",
105
+ "import glob\n",
106
+ "import gdown\n",
107
+ "import requests\n",
108
+ "import subprocess\n",
109
+ "from urllib.parse import urlparse, unquote\n",
110
+ "from pathlib import Path\n",
111
+ "import toml\n",
112
+ "\n",
113
+ "#root_dir\n",
114
+ "root_dir = \"/content\"\n",
115
+ "deps_dir = os.path.join(root_dir,\"deps\")\n",
116
+ "repo_dir = os.path.join(root_dir,\"Kohya-Colab\")\n",
117
+ "training_dir = os.path.join(root_dir,\"Dreamboot-Config\")\n",
118
+ "pretrained_model = os.path.join(root_dir,\"pretrained_model\")\n",
119
+ "vae_dir = os.path.join(root_dir,\"vae\")\n",
120
+ "config_dir = os.path.join(training_dir,\"config\")\n",
121
+ "\n",
122
+ "#repo_dir\n",
123
+ "accelerate_config = os.path.join(repo_dir, \"accelerate_config/config.yaml\")\n",
124
+ "tools_dir = os.path.join(repo_dir,\"tools\")\n",
125
+ "finetune_dir = os.path.join(repo_dir,\"finetune\")\n",
126
+ "\n",
127
+ "for store in [\"root_dir\", \"deps_dir\", \"repo_dir\", \"training_dir\", \"pretrained_model\", \"vae_dir\", \"accelerate_config\", \"tools_dir\", \"finetune_dir\", \"config_dir\"]:\n",
128
+ " with capture.capture_output() as cap:\n",
129
+ " %store {store}\n",
130
+ " del cap\n",
131
+ "\n",
132
+ "repo_url = \"https://github.com/phamhungd/Kohya-Colab\"\n",
133
+ "bitsandytes_main_py = \"/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py\"\n",
134
+ "branch = \"\"\n",
135
+ "verbose = False\n",
136
+ "\n",
137
+ "def read_file(filename):\n",
138
+ " with open(filename, \"r\") as f:\n",
139
+ " contents = f.read()\n",
140
+ " return contents\n",
141
+ "\n",
142
+ "\n",
143
+ "def write_file(filename, contents):\n",
144
+ " with open(filename, \"w\") as f:\n",
145
+ " f.write(contents)\n",
146
+ "\n",
147
+ "\n",
148
+ "def clone_repo(url):\n",
149
+ " if not os.path.exists(repo_dir):\n",
150
+ " os.chdir(root_dir)\n",
151
+ " !git clone {url} {repo_dir}\n",
152
+ " else:\n",
153
+ " os.chdir(repo_dir)\n",
154
+ " !git pull origin {branch} if branch else !git pull\n",
155
+ "\n",
156
+ "\n",
157
+ "def install_dependencies():\n",
158
+ " s = getoutput('nvidia-smi')\n",
159
+ "\n",
160
+ " if 'T4' in s:\n",
161
+ " !sed -i \"s@cpu@cuda@\" library/model_util.py\n",
162
+ "\n",
163
+ " !pip install {'-q' if not verbose else ''} --upgrade -r requirements.txt\n",
164
+ "\n",
165
+ " from accelerate.utils import write_basic_config\n",
166
+ "\n",
167
+ " if not os.path.exists(accelerate_config):\n",
168
+ " write_basic_config(save_location=accelerate_config)\n",
169
+ "\n",
170
+ "\n",
171
+ "def remove_bitsandbytes_message(filename):\n",
172
+ " welcome_message = \"\"\"\n",
173
+ "def evaluate_cuda_setup():\n",
174
+ " print('')\n",
175
+ " print('='*35 + 'BUG REPORT' + '='*35)\n",
176
+ " print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')\n",
177
+ " print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')\n",
178
+ " print('='*80)\"\"\"\n",
179
+ "\n",
180
+ " new_welcome_message = \"\"\"\n",
181
+ "def evaluate_cuda_setup():\n",
182
+ " import os\n",
183
+ " if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':\n",
184
+ " print('')\n",
185
+ " print('=' * 35 + 'BUG REPORT' + '=' * 35)\n",
186
+ " print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')\n",
187
+ " print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')\n",
188
+ " print('To hide this message, set the BITSANDBYTES_NOWELCOME variable like so: export BITSANDBYTES_NOWELCOME=1')\n",
189
+ " print('=' * 80)\"\"\"\n",
190
+ "\n",
191
+ " contents = read_file(filename)\n",
192
+ " new_contents = contents.replace(welcome_message, new_welcome_message)\n",
193
+ " write_file(filename, new_contents)\n",
194
+ "\n",
195
+ "\n",
196
+ "def main():\n",
197
+ " os.chdir(root_dir)\n",
198
+ "\n",
199
+ " for dir in [\n",
200
+ " deps_dir,\n",
201
+ " training_dir,\n",
202
+ " config_dir,\n",
203
+ " pretrained_model,\n",
204
+ " vae_dir\n",
205
+ " ]:\n",
206
+ " os.makedirs(dir, exist_ok=True)\n",
207
+ "\n",
208
+ " clone_repo(repo_url)\n",
209
+ "\n",
210
+ " if branch:\n",
211
+ " os.chdir(repo_dir)\n",
212
+ " status = os.system(f\"git checkout {branch}\")\n",
213
+ " if status != 0:\n",
214
+ " raise Exception(\"Failed to checkout branch or commit\")\n",
215
+ "\n",
216
+ " os.chdir(repo_dir)\n",
217
+ "\n",
218
+ " !apt install aria2 {'-qq' if not verbose else ''}\n",
219
+ "\n",
220
+ " install_dependencies()\n",
221
+ " time.sleep(3)\n",
222
+ "\n",
223
+ " remove_bitsandbytes_message(bitsandytes_main_py)\n",
224
+ "\n",
225
+ " os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n",
226
+ " os.environ[\"BITSANDBYTES_NOWELCOME\"] = \"1\"\n",
227
+ " os.environ[\"SAFETENSORS_FAST_GPU\"] = \"1\"\n",
228
+ "\n",
229
+ " cuda_path = \"/usr/local/cuda-11.8/targets/x86_64-linux/lib/\"\n",
230
+ " ld_library_path = os.environ.get(\"LD_LIBRARY_PATH\", \"\")\n",
231
+ " os.environ[\"LD_LIBRARY_PATH\"] = f\"{ld_library_path}:{cuda_path}\"\n",
232
+ "\n",
233
+ "main()\n",
234
+ "\n",
235
+ "\n",
236
+ "print(f\"Your train data directory : {train_data_dir}\")\n",
237
+ "\n",
238
+ "os.chdir(finetune_dir)\n",
239
+ "\n",
240
+ "config = {\n",
241
+ " \"_train_data_dir\": train_data_dir, # Manter a referência original\n",
242
+ " \"batch_size\": 8, # Valor do segundo código\n",
243
+ " \"repo_id\": \"SmilingWolf/wd-v1-4-convnextv2-tagger-v2\", # Valor do segundo código\n",
244
+ " \"beam_search\": beam_search, # Manter a referência original\n",
245
+ " \"min_length\": min_length, # Manter a referência original\n",
246
+ " \"max_length\": max_length, # Manter a referência original\n",
247
+ " \"debug\": True, # Do segundo código\n",
248
+ " \"caption_extension\": \".txt\", # Valor do segundo código\n",
249
+ " \"max_data_loader_n_workers\": 2, # Valor do segundo código\n",
250
+ " \"recursive\": True, # Do segundo código\n",
251
+ " \"remove_underscore\": True, # Do segundo código\n",
252
+ " \"general_threshold\": Threshold, # Do segundo código\n",
253
+ " \"character_threshold\": 0.50 # Do segundo código\n",
254
+ "}\n",
255
+ "\n",
256
+ "args = \"\"\n",
257
+ "for k, v in config.items():\n",
258
+ " if k.startswith(\"_\"):\n",
259
+ " args += f'\"{v}\" '\n",
260
+ " elif isinstance(v, str):\n",
261
+ " args += f'--{k}=\"{v}\" '\n",
262
+ " elif isinstance(v, bool) and v:\n",
263
+ " args += f\"--{k} \"\n",
264
+ " elif isinstance(v, float) and not isinstance(v, bool):\n",
265
+ " args += f\"--{k}={v} \"\n",
266
+ " elif isinstance(v, int) and not isinstance(v, bool):\n",
267
+ " args += f\"--{k}={v} \"\n",
268
+ "\n",
269
+ "# Verificar qual script executar com base em NoAutoCaption\n",
270
+ "if 'NoAutoCaption' not in locals() or not NoAutoCaption:\n",
271
+ " final_args = f\"python tag_images_by_wd14_tagger.py {args}\"\n",
272
+ "else:\n",
273
+ " final_args = f\"python make_captions.py {args}\"\n",
274
+ "\n",
275
+ "os.chdir(finetune_dir)\n",
276
+ "!{final_args}\n",
277
+ "\n",
278
+ "os.chdir(root_dir)\n",
279
+ "\n",
280
+ "extension = \".txt\"\n",
281
+ "custom_tag = CustomCaption\n",
282
+ "\n",
283
+ "def read_file(filename):\n",
284
+ " with open(filename, \"r\") as f:\n",
285
+ " contents = f.read()\n",
286
+ " return contents\n",
287
+ "\n",
288
+ "def write_file(filename, contents):\n",
289
+ " with open(filename, \"w\") as f:\n",
290
+ " f.write(contents)\n",
291
+ "\n",
292
+ "def process_tags(filename, custom_tag, append, remove_tag):\n",
293
+ " contents = read_file(filename)\n",
294
+ " tags = [tag.strip() for tag in contents.split(',')]\n",
295
+ " custom_tags = [tag.strip() for tag in custom_tag.split(',')]\n",
296
+ "\n",
297
+ " for custom_tag in custom_tags:\n",
298
+ " custom_tag = custom_tag.replace(\"_\", \" \")\n",
299
+ " if remove_tag:\n",
300
+ " while custom_tag in tags:\n",
301
+ " tags.remove(custom_tag)\n",
302
+ " else:\n",
303
+ " if custom_tag not in tags:\n",
304
+ " if append:\n",
305
+ " tags.append(custom_tag)\n",
306
+ " else:\n",
307
+ " tags.insert(0, custom_tag)\n",
308
+ "\n",
309
+ " contents = ', '.join(tags)\n",
310
+ " write_file(filename, contents)\n",
311
+ "\n",
312
+ "def process_directory(train_data_dir, tag, append, remove_tag, recursive):\n",
313
+ " for filename in os.listdir(train_data_dir):\n",
314
+ " file_path = os.path.join(train_data_dir, filename)\n",
315
+ " if os.path.isdir(file_path) and recursive:\n",
316
+ " process_directory(file_path, tag, append, remove_tag, recursive)\n",
317
+ " elif filename.endswith(extension):\n",
318
+ " process_tags(file_path, tag, append, remove_tag)\n",
319
+ "\n",
320
+ "if not any(\n",
321
+ " [filename.endswith(extension) for filename in os.listdir(train_data_dir)]\n",
322
+ "):\n",
323
+ " for filename in os.listdir(train_data_dir):\n",
324
+ " if filename.endswith((\".png\", \".jpg\", \".jpeg\", \".webp\", \".bmp\")):\n",
325
+ " open(\n",
326
+ " os.path.join(train_data_dir, filename.split(\".\")[0] + extension),\n",
327
+ " \"w\",\n",
328
+ " ).close()\n",
329
+ "if not NoAutoCaption :\n",
330
+ " process_directory(train_data_dir, custom_tag, False, False, True)\n",
331
+ "\n",
332
+ "#3.Setting\n",
333
+ "\n",
334
+ "MODEL_URLS = {\n",
335
+ " \"GSMaletoPhotoreal_v4\" : \"https://civitai.com/api/download/models/164715\",\n",
336
+ " \"GSMaletoFusion_v1\" : \"https://civitai.com/api/download/models/138518\",\n",
337
+ " \"GSMaletoAnime_v1\" : \"https://civitai.com/api/download/models/503605\",\n",
338
+ "}\n",
339
+ "MODEL_URL = MODEL_URLS.get(Model, Model)\n",
340
+ "drive_dir = os.path.join(root_dir, \"drive/MyDrive\")\n",
341
+ "def get_supported_extensions():\n",
342
+ " return tuple([\".ckpt\", \".safetensors\", \".pt\", \".pth\"])\n",
343
+ "\n",
344
+ "def get_filename(url, quiet=True):\n",
345
+ " extensions = get_supported_extensions()\n",
346
+ "\n",
347
+ " if url.startswith(drive_dir) or url.endswith(tuple(extensions)):\n",
348
+ " filename = os.path.basename(url)\n",
349
+ " else:\n",
350
+ " response = requests.get(url, stream=True)\n",
351
+ " response.raise_for_status()\n",
352
+ "\n",
353
+ " if 'content-disposition' in response.headers:\n",
354
+ " content_disposition = response.headers['content-disposition']\n",
355
+ " filename = re.findall('filename=\"?([^\"]+)\"?', content_disposition)[0]\n",
356
+ " else:\n",
357
+ " url_path = urlparse(url).path\n",
358
+ " filename = unquote(os.path.basename(url_path))\n",
359
+ "\n",
360
+ " if filename.endswith(tuple(get_supported_extensions())):\n",
361
+ " return filename\n",
362
+ " else:\n",
363
+ " return None\n",
364
+ "\n",
365
+ "def get_most_recent_file(directory):\n",
366
+ " files = glob.glob(os.path.join(directory, \"*\"))\n",
367
+ " if not files:\n",
368
+ " return None\n",
369
+ " most_recent_file = max(files, key=os.path.getmtime)\n",
370
+ " basename = os.path.basename(most_recent_file)\n",
371
+ "\n",
372
+ " return most_recent_file\n",
373
+ "\n",
374
+ "def parse_args(config):\n",
375
+ " args = []\n",
376
+ "\n",
377
+ " for k, v in config.items():\n",
378
+ " if k.startswith(\"_\"):\n",
379
+ " args.append(f\"{v}\")\n",
380
+ " elif isinstance(v, str) and v is not None:\n",
381
+ " args.append(f'--{k}={v}')\n",
382
+ " elif isinstance(v, bool) and v:\n",
383
+ " args.append(f\"--{k}\")\n",
384
+ " elif isinstance(v, float) and not isinstance(v, bool):\n",
385
+ " args.append(f\"--{k}={v}\")\n",
386
+ " elif isinstance(v, int) and not isinstance(v, bool):\n",
387
+ " args.append(f\"--{k}={v}\")\n",
388
+ "\n",
389
+ " return args\n",
390
+ "def aria2_download(dir, filename, url):\n",
391
+ " aria2_config = {\n",
392
+ " \"console-log-level\" : \"error\",\n",
393
+ " \"summary-interval\" : 10,\n",
394
+ " \"continue\" : True,\n",
395
+ " \"max-connection-per-server\" : 16,\n",
396
+ " \"min-split-size\" : \"1M\",\n",
397
+ " \"split\" : 16,\n",
398
+ " \"dir\" : dir,\n",
399
+ " \"out\" : filename,\n",
400
+ " \"_url\" : url,\n",
401
+ " }\n",
402
+ " aria2_args = parse_args(aria2_config)\n",
403
+ " subprocess.run([\"aria2c\", *aria2_args])\n",
404
+ "\n",
405
+ "def gdown_download(url, dst, filepath):\n",
406
+ " if \"/uc?id/\" in url:\n",
407
+ " return gdown.download(url, filepath, quiet=False)\n",
408
+ " elif \"/file/d/\" in url:\n",
409
+ " return gdown.download(url=url, output=filepath, quiet=False, fuzzy=True)\n",
410
+ " elif \"/drive/folders/\" in url:\n",
411
+ " os.chdir(dst)\n",
412
+ " return gdown.download_folder(url, quiet=True, use_cookies=False)\n",
413
+ "\n",
414
+ "def download(url, dst):\n",
415
+ " print(f\"Starting downloading from {url}\")\n",
416
+ " filename = get_filename(url)\n",
417
+ " filepath = os.path.join(dst, filename)\n",
418
+ "\n",
419
+ " if \"drive.google.com\" in url:\n",
420
+ " gdown = gdown_download(url, dst, filepath)\n",
421
+ " else:\n",
422
+ " if \"huggingface.co\" in url and \"/blob/\" in url:\n",
423
+ " url = url.replace(\"/blob/\", \"/resolve/\")\n",
424
+ " aria2_download(dst, filename, url)\n",
425
+ "\n",
426
+ " print(f\"Download finished: {filepath}\")\n",
427
+ " return filepath\n",
428
+ "\n",
429
+ "def get_gpu_name():\n",
430
+ " try:\n",
431
+ " return subprocess.check_output(\"nvidia-smi --query-gpu=name --format=csv,noheader,nounits\", shell=True).decode('ascii').strip()\n",
432
+ " except:\n",
433
+ " return None\n",
434
+ "\n",
435
+ "def main():\n",
436
+ " global model_path, vae_path\n",
437
+ " model_path, vae_path = None, None\n",
438
+ " download_targets = {\n",
439
+ " \"model\": (MODEL_URL, pretrained_model),\n",
440
+ " }\n",
441
+ " for target, (url, dst) in download_targets.items():\n",
442
+ " if url and not url.startswith(f\"PASTE {target.upper()} URL OR GDRIVE PATH HERE\"):\n",
443
+ " filepath = download(url, dst)\n",
444
+ " if target == \"model\":\n",
445
+ " model_path = filepath\n",
446
+ " print()\n",
447
+ " if model_path:\n",
448
+ " print(f\"Selected model: {model_path}\")\n",
449
+ "\n",
450
+ "if Model.startswith(\"/content/drive/\"):\n",
451
+ " model_path = Model\n",
452
+ " print(f\"Diffusers model is loaded : {Model}\")\n",
453
+ "else:\n",
454
+ " main()\n",
455
+ "\n",
456
+ "!aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt -d /content/VAE -o VAE84EMA.vae.pt\n",
457
+ "vae = \"/content/VAE/VAE84EMA.vae.pt\"\n",
458
+ "\n",
459
+ "#Dataset Config\n",
460
+ "\n",
461
+ "activation_word = \"GSGI\"\n",
462
+ "caption_extension = \".txt\"\n",
463
+ "token_to_captions = False\n",
464
+ "dataset_repeats = Repeats\n",
465
+ "keep_tokens = 0\n",
466
+ "flip_aug = False\n",
467
+ "\n",
468
+ "if ',' in activation_word or ' ' in activation_word:\n",
469
+ " words = activation_word.replace(',', ' ').split()\n",
470
+ " class_token = words[-1]\n",
471
+ "\n",
472
+ "\n",
473
+ "def read_file(filename):\n",
474
+ " with open(filename, \"r\") as f:\n",
475
+ " contents = f.read()\n",
476
+ " return contents\n",
477
+ "\n",
478
+ "\n",
479
+ "def write_file(filename, contents):\n",
480
+ " with open(filename, \"w\") as f:\n",
481
+ " f.write(contents)\n",
482
+ "\n",
483
+ "\n",
484
+ "def get_supported_images(folder):\n",
485
+ " supported_extensions = (\".png\", \".jpg\", \".jpeg\", \".webp\", \".bmp\")\n",
486
+ " return [file for ext in supported_extensions for file in glob.glob(f\"{folder}/*{ext}\")]\n",
487
+ "\n",
488
+ "\n",
489
+ "def get_subfolders_with_supported_images(folder):\n",
490
+ " subfolders = [os.path.join(folder, subfolder) for subfolder in os.listdir(folder) if os.path.isdir(os.path.join(folder, subfolder))]\n",
491
+ " return [subfolder for subfolder in subfolders if len(get_supported_images(subfolder)) > 0]\n",
492
+ "\n",
493
+ "\n",
494
+ "def process_tags(filename, custom_tag, remove_tag):\n",
495
+ " contents = read_file(filename)\n",
496
+ " tags = [tag.strip() for tag in contents.split(',')]\n",
497
+ " custom_tags = [tag.strip() for tag in custom_tag.split(',')]\n",
498
+ "\n",
499
+ " for custom_tag in custom_tags:\n",
500
+ " custom_tag = custom_tag.replace(\"_\", \" \")\n",
501
+ " # if remove_tag:\n",
502
+ " # while custom_tag in tags:\n",
503
+ " # tags.remove(custom_tag)\n",
504
+ " # else:\n",
505
+ " if custom_tag not in tags:\n",
506
+ " tags.insert(0, custom_tag)\n",
507
+ "\n",
508
+ " contents = ', '.join(tags)\n",
509
+ " write_file(filename, contents)\n",
510
+ "\n",
511
+ "\n",
512
+ "def process_folder_recursively(folder):\n",
513
+ " for root, _, files in os.walk(folder):\n",
514
+ " for file in files:\n",
515
+ " if file.endswith(caption_extension):\n",
516
+ " file_path = os.path.join(root, file)\n",
517
+ " extracted_class_token = get_class_token_from_folder_name(root, folder)\n",
518
+ " train_supported_images = get_supported_images(train_data_dir)\n",
519
+ " tag = extracted_class_token if extracted_class_token else activation_word if train_supported_images else \"\"\n",
520
+ " if not tag == \"\":\n",
521
+ " process_tags(file_path, tag, remove_tag=(not token_to_captions))\n",
522
+ "\n",
523
+ "\n",
524
+ "def get_num_repeats(folder):\n",
525
+ " folder_name = os.path.basename(folder)\n",
526
+ " try:\n",
527
+ " repeats, _ = folder_name.split('_', 1)\n",
528
+ " num_repeats = int(repeats)\n",
529
+ " except ValueError:\n",
530
+ " num_repeats = dataset_repeats\n",
531
+ "\n",
532
+ " return num_repeats\n",
533
+ "\n",
534
+ "\n",
535
+ "def get_class_token_from_folder_name(folder, parent_folder):\n",
536
+ " if folder == parent_folder:\n",
537
+ " return class_token\n",
538
+ "\n",
539
+ " folder_name = os.path.basename(folder)\n",
540
+ " try:\n",
541
+ " _, concept = folder_name.split('_', 1)\n",
542
+ " return concept\n",
543
+ " except ValueError:\n",
544
+ " return \"\"\n",
545
+ "\n",
546
+ "train_supported_images = get_supported_images(train_data_dir)\n",
547
+ "train_subfolders = get_subfolders_with_supported_images(train_data_dir)\n",
548
+ "\n",
549
+ "subsets = []\n",
550
+ "config = {\n",
551
+ " \"general\": {\n",
552
+ " \"enable_bucket\": True,\n",
553
+ " \"caption_extension\": caption_extension,\n",
554
+ " \"shuffle_caption\": True,\n",
555
+ " \"keep_tokens\": keep_tokens,\n",
556
+ " \"bucket_reso_steps\": 64,\n",
557
+ " \"bucket_no_upscale\": False,\n",
558
+ " },\n",
559
+ " \"datasets\": [\n",
560
+ " {\n",
561
+ " \"resolution\": resolution,\n",
562
+ " \"min_bucket_reso\": 320 if resolution > 640 else 256,\n",
563
+ " \"max_bucket_reso\": 1280 if resolution > 640 else 1024,\n",
564
+ " \"caption_dropout_rate\": 0,\n",
565
+ " \"caption_tag_dropout_rate\": 0,\n",
566
+ " \"caption_dropout_every_n_epochs\": 0,\n",
567
+ " \"flip_aug\": flip_aug,\n",
568
+ " \"color_aug\": False,\n",
569
+ " \"face_crop_aug_range\": None,\n",
570
+ " \"subsets\": subsets,\n",
571
+ " }\n",
572
+ " ],\n",
573
+ "}\n",
574
+ "\n",
575
+ "if token_to_captions and keep_tokens < 2:\n",
576
+ " keep_tokens = 1\n",
577
+ "\n",
578
+ "process_folder_recursively(train_data_dir)\n",
579
+ "\n",
580
+ "if train_supported_images:\n",
581
+ " subsets.append({\n",
582
+ " \"image_dir\": train_data_dir,\n",
583
+ " \"class_tokens\": activation_word,\n",
584
+ " \"num_repeats\": dataset_repeats,\n",
585
+ " })\n",
586
+ "\n",
587
+ "for subfolder in train_subfolders:\n",
588
+ " num_repeats = get_num_repeats(subfolder)\n",
589
+ " extracted_class_token = get_class_token_from_folder_name(subfolder, train_data_dir)\n",
590
+ " subsets.append({\n",
591
+ " \"image_dir\": subfolder,\n",
592
+ " \"class_tokens\": extracted_class_token if extracted_class_token else None,\n",
593
+ " \"num_repeats\": num_repeats,\n",
594
+ " })\n",
595
+ "\n",
596
+ "for subset in subsets:\n",
597
+ " if not glob.glob(f\"{subset['image_dir']}/*.txt\"):\n",
598
+ " subset[\"class_tokens\"] = activation_word\n",
599
+ "\n",
600
+ "dataset_config = os.path.join(config_dir, \"dataset_config.toml\")\n",
601
+ "\n",
602
+ "for key in config:\n",
603
+ " if isinstance(config[key], dict):\n",
604
+ " for sub_key in config[key]:\n",
605
+ " if config[key][sub_key] == \"\":\n",
606
+ " config[key][sub_key] = None\n",
607
+ " elif config[key] == \"\":\n",
608
+ " config[key] = None\n",
609
+ "\n",
610
+ "config_str = toml.dumps(config)\n",
611
+ "\n",
612
+ "with open(dataset_config, \"w\") as f:\n",
613
+ " f.write(config_str)\n",
614
+ "\n",
615
+ "print(config_str)\n",
616
+ "\n",
617
+ "#Config\n",
618
+ "optimizer_args = False\n",
619
+ "conv_dim = 4\n",
620
+ "conv_alpha = 1\n",
621
+ "\n",
622
+ "network_module = \"networks.lora\"\n",
623
+ "network_args = \"\"\n",
624
+ "\n",
625
+ "config = {\n",
626
+ " \"model_arguments\": {\n",
627
+ " \"v2\": False,\n",
628
+ " \"v_parameterization\": False,\n",
629
+ " \"pretrained_model_name_or_path\": model_path,\n",
630
+ " \"vae\": vae,\n",
631
+ " },\n",
632
+ " \"additional_network_arguments\": {\n",
633
+ " \"no_metadata\": False,\n",
634
+ " \"unet_lr\": float(unet_lr),\n",
635
+ " \"text_encoder_lr\": float(text_encoder_lr),\n",
636
+ " \"network_module\": network_module,\n",
637
+ " \"network_dim\": 64,\n",
638
+ " \"network_alpha\": 48,\n",
639
+ " \"training_comment\": \"GSGI Trainer\",\n",
640
+ " },\n",
641
+ " \"optimizer_arguments\": {\n",
642
+ " \"optimizer_type\": \"AdamW8bit\",\n",
643
+ " \"optimizer_args\": eval(optimizer_args) if optimizer_args else None,\n",
644
+ " \"learning_rate\": unet_lr,\n",
645
+ " \"max_grad_norm\": 1.0,\n",
646
+ " \"lr_scheduler\": \"cosine_with_restarts\",\n",
647
+ " \"lr_scheduler_num_cycles\": 4,\n",
648
+ " },\n",
649
+ " \"dataset_arguments\": {\n",
650
+ " \"cache_latents\": True,\n",
651
+ " \"debug_dataset\": False,\n",
652
+ " \"vae_batch_size\": Batch_size,\n",
653
+ " },\n",
654
+ " \"training_arguments\": {\n",
655
+ " \"output_dir\": output_dir,\n",
656
+ " \"output_name\": Loraname,\n",
657
+ " \"save_precision\": \"fp16\",\n",
658
+ " \"save_every_n_epochs\": save_n_epochs_type_value,\n",
659
+ " \"train_batch_size\": Batch_size,\n",
660
+ " \"max_token_length\": 225,\n",
661
+ " \"mem_eff_attn\": False,\n",
662
+ " \"xformers\": True,\n",
663
+ " \"max_train_epochs\": num_epochs,\n",
664
+ " \"max_data_loader_n_workers\": 8,\n",
665
+ " \"persistent_data_loader_workers\": True,\n",
666
+ " \"gradient_checkpointing\": False,\n",
667
+ " \"gradient_accumulation_steps\": 1,\n",
668
+ " \"mixed_precision\": \"fp16\",\n",
669
+ " \"clip_skip\": 1,\n",
670
+ " \"logging_dir\": \"/content/Dreamboot-Config/logs\",\n",
671
+ " \"log_prefix\": Loraname,\n",
672
+ " \"lowram\": True,\n",
673
+ " \"training_comment\" : \"train by GSGI Trainer\",\n",
674
+ " },\n",
675
+ " \"sample_prompt_arguments\": {\n",
676
+ " \"sample_every_n_steps\": 200,\n",
677
+ " \"sample_every_n_epochs\": 1,\n",
678
+ " \"sample_sampler\": \"euler\",\n",
679
+ " },\n",
680
+ " \"dreambooth_arguments\": {\n",
681
+ " \"prior_loss_weight\": 1,\n",
682
+ " },\n",
683
+ " \"saving_arguments\": {\n",
684
+ " \"save_model_as\": \"safetensors\",\n",
685
+ " },\n",
686
+ "}\n",
687
+ "SamplePrompt = f\"{Loraname},front view, masterpiece,best quality\"\n",
688
+ "sample_str = f\"\"\"\n",
689
+ " {SamplePrompt}\\\n",
690
+ " --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry \\\n",
691
+ " --w 512 \\\n",
692
+ " --h 768 \\\n",
693
+ " --l 7 \\\n",
694
+ " --s 30\n",
695
+ "\"\"\"\n",
696
+ "config_path = os.path.join(config_dir, \"config_file.toml\")\n",
697
+ "prompt_path = os.path.join(config_dir, \"sample_prompt.txt\")\n",
698
+ "\n",
699
+ "for key in config:\n",
700
+ " if isinstance(config[key], dict):\n",
701
+ " for sub_key in config[key]:\n",
702
+ " if config[key][sub_key] == \"\":\n",
703
+ " config[key][sub_key] = None\n",
704
+ " elif config[key] == \"\":\n",
705
+ " config[key] = None\n",
706
+ "\n",
707
+ "config_str = toml.dumps(config)\n",
708
+ "\n",
709
+ "def write_file(filename, contents):\n",
710
+ " with open(filename, \"w\") as f:\n",
711
+ " f.write(contents)\n",
712
+ "\n",
713
+ "write_file(config_path, config_str)\n",
714
+ "write_file(prompt_path, sample_str)\n",
715
+ "\n",
716
+ "print(config_str)\n",
717
+ "\n",
718
+ "os.chdir(repo_dir)\n",
719
+ "\n",
720
+ "\n",
721
+ "train_file = \"train_network.py\"\n",
722
+ "ConfigFolder = \"/content/Dreamboot-Config/config\"\n",
723
+ "sample_prompt = f\"{ConfigFolder}/sample_prompt.txt\"\n",
724
+ "config_file = f\"{ConfigFolder}/config_file.toml\"\n",
725
+ "dataset_config = f\"{ConfigFolder}/dataset_config.toml\"\n",
726
+ "accelerate_conf = {\n",
727
+ " \"config_file\" : accelerate_config,\n",
728
+ " \"num_cpu_threads_per_process\" : 1,\n",
729
+ "}\n",
730
+ "\n",
731
+ "train_conf = {\n",
732
+ " \"sample_prompts\" : sample_prompt,\n",
733
+ " \"dataset_config\" : dataset_config,\n",
734
+ " \"config_file\" : config_file\n",
735
+ "}\n",
736
+ "\n",
737
+ "def train(config):\n",
738
+ " args = \"\"\n",
739
+ " for k, v in config.items():\n",
740
+ " if k.startswith(\"_\"):\n",
741
+ " args += f'\"{v}\" '\n",
742
+ " elif isinstance(v, str):\n",
743
+ " args += f'--{k}=\"{v}\" '\n",
744
+ " elif isinstance(v, bool) and v:\n",
745
+ " args += f\"--{k} \"\n",
746
+ " elif isinstance(v, float) and not isinstance(v, bool):\n",
747
+ " args += f\"--{k}={v} \"\n",
748
+ " elif isinstance(v, int) and not isinstance(v, bool):\n",
749
+ " args += f\"--{k}={v} \"\n",
750
+ "\n",
751
+ " return args\n",
752
+ "\n",
753
+ "accelerate_args = train(accelerate_conf)\n",
754
+ "train_args = train(train_conf)\n",
755
+ "final_args = f\"accelerate launch {accelerate_args} {train_file} {train_args}\"\n"
756
+ ]
757
+ }
758
+ ],
759
+ "metadata": {
760
+ "language_info": {
761
+ "name": "python"
762
+ },
763
+ "colab": {
764
+ "provenance": []
765
+ },
766
+ "kernelspec": {
767
+ "name": "python3",
768
+ "display_name": "Python 3"
769
+ }
770
+ },
771
+ "nbformat": 4,
772
+ "nbformat_minor": 0
773
+ }