malcolmrey commited on
Commit
4839ac2
·
1 Parent(s): 6193c29

training scripts

Browse files
training-scripts/musubi/ernie/dataset.currentset.toml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Image-only ERNIE-Image dataset config for staged wrapper runs.
2
+ # The wrapper copies a selected source dataset into datasets/currentset,
3
+ # creates empty .txt sidecars there, then uses this config for caching/training.
4
+
5
+ [general]
6
+ resolution = [512, 512]
7
+ caption_extension = ".txt"
8
+ batch_size = 1
9
+ enable_bucket = true
10
+ bucket_no_upscale = false
11
+
12
+ [[datasets]]
13
+ image_directory = "C:/Development/trainers/musubi-tuner/datasets/currentset"
14
+ cache_directory = "C:/Development/trainers/musubi-tuner/datasets/cache"
15
+ num_repeats = 1
training-scripts/musubi/ernie/prepare_dataset_ernie.bat ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal EnableExtensions
3
+
4
+ cd /d "%~dp0\..\.."
5
+
6
+ if "%~1"=="" (
7
+ echo Usage: %~nx0 ^<dataset_name^>
8
+ echo Example: %~nx0 aubreyplaza
9
+ exit /b 1
10
+ )
11
+
12
+ set "DATASET_NAME=%~1"
13
+ set "SOURCE_DATASET_DIR=C:\Development\ai-toolkit\datasets\%DATASET_NAME%"
14
+ set "CURRENTSET_DIR=C:\Development\trainers\musubi-tuner\datasets\currentset"
15
+ set "CACHE_DIR=C:\Development\trainers\musubi-tuner\datasets\cache"
16
+ set "STATUS_FILE=C:\Development\trainers\musubi-tuner\datasets\ernie_%DATASET_NAME%.txt"
17
+ set "DATASET_CONFIG=C:\Development\trainers\musubi-tuner\templates\ernie\dataset.currentset.toml"
18
+ set "PYTHON_EXE=C:\Development\trainers\musubi-tuner\venv\Scripts\python.exe"
19
+ set "ERNIE_VAE=C:/Development/ComfyUI/models/vae/flux2_vae.safetensors"
20
+ set "ERNIE_TEXT_ENCODER=C:/Development/ComfyUI/models/text_encoders/ministral-3-3b.safetensors"
21
+ set "COUNT_FILE=%TEMP%\musubi_ernie_image_count.txt"
22
+ set "EXIT_CODE=0"
23
+ set "IMAGE_COUNT=0"
24
+
25
+ if not exist "%SOURCE_DATASET_DIR%" (
26
+ > "%STATUS_FILE%" echo errored
27
+ echo [ERROR] Dataset directory not found: %SOURCE_DATASET_DIR%
28
+ exit /b 1
29
+ )
30
+
31
+ if not exist "%PYTHON_EXE%" (
32
+ > "%STATUS_FILE%" echo errored
33
+ echo [ERROR] Python executable not found: %PYTHON_EXE%
34
+ exit /b 1
35
+ )
36
+
37
+ "%PYTHON_EXE%" -c "from pathlib import Path; exts={'.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl'}; print(sum(1 for f in Path(r'%SOURCE_DATASET_DIR%').iterdir() if f.is_file() and f.suffix.lower() in exts))" > "%COUNT_FILE%"
38
+ if errorlevel 1 (
39
+ > "%STATUS_FILE%" echo errored
40
+ echo [ERROR] Failed to count supported image files in: %SOURCE_DATASET_DIR%
41
+ exit /b 1
42
+ )
43
+
44
+ set /p IMAGE_COUNT=<"%COUNT_FILE%"
45
+ del /q "%COUNT_FILE%" >nul 2>nul
46
+
47
+ if "%IMAGE_COUNT%"=="0" (
48
+ > "%STATUS_FILE%" echo errored
49
+ echo [ERROR] No supported image files found in: %SOURCE_DATASET_DIR%
50
+ exit /b 1
51
+ )
52
+
53
+ > "%STATUS_FILE%" echo starting
54
+
55
+ echo [INFO] Dataset name : %DATASET_NAME%
56
+ echo [INFO] Source dataset : %SOURCE_DATASET_DIR%
57
+ echo [INFO] Staging dir : %CURRENTSET_DIR%
58
+ echo [INFO] Cache dir : %CACHE_DIR%
59
+ echo [INFO] Status file : %STATUS_FILE%
60
+ echo [INFO] Dataset config : %DATASET_CONFIG%
61
+ echo [INFO] Image count : %IMAGE_COUNT%
62
+
63
+ if not exist "%CURRENTSET_DIR%" mkdir "%CURRENTSET_DIR%"
64
+ if not exist "%CACHE_DIR%" mkdir "%CACHE_DIR%"
65
+
66
+ echo [INFO] Clearing staging and cache directories...
67
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue; Get-ChildItem -LiteralPath '%CACHE_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue"
68
+ if errorlevel 1 (
69
+ > "%STATUS_FILE%" echo errored
70
+ echo [ERROR] Failed to clear staging or cache directories.
71
+ exit /b 1
72
+ )
73
+
74
+ echo [INFO] Copying dataset images to currentset...
75
+ powershell -NoProfile -Command "Copy-Item -LiteralPath (Get-ChildItem -LiteralPath '%SOURCE_DATASET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | Select-Object -ExpandProperty FullName) -Destination '%CURRENTSET_DIR%' -Force"
76
+ if errorlevel 1 (
77
+ > "%STATUS_FILE%" echo errored
78
+ echo [ERROR] Failed to copy dataset images into currentset.
79
+ exit /b 1
80
+ )
81
+
82
+ echo [INFO] Creating empty caption files...
83
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | ForEach-Object { $caption = [System.IO.Path]::ChangeExtension($_.FullName, '.txt'); if (-not (Test-Path -LiteralPath $caption)) { New-Item -ItemType File -Path $caption | Out-Null } }"
84
+ if errorlevel 1 (
85
+ > "%STATUS_FILE%" echo errored
86
+ echo [ERROR] Failed to create empty caption files in currentset.
87
+ exit /b 1
88
+ )
89
+
90
+ echo [INFO] Running latent caching...
91
+ "%PYTHON_EXE%" src\musubi_tuner\ernie_image_cache_latents.py ^
92
+ --dataset_config "%DATASET_CONFIG%" ^
93
+ --vae "%ERNIE_VAE%" ^
94
+ --device cuda ^
95
+ --skip_existing
96
+ if errorlevel 1 (
97
+ > "%STATUS_FILE%" echo errored
98
+ echo [ERROR] Latent caching failed.
99
+ exit /b 1
100
+ )
101
+
102
+ echo [INFO] Running text encoder caching...
103
+ "%PYTHON_EXE%" src\musubi_tuner\ernie_image_cache_text_encoder_outputs.py ^
104
+ --dataset_config "%DATASET_CONFIG%" ^
105
+ --text_encoder "%ERNIE_TEXT_ENCODER%" ^
106
+ --device cuda ^
107
+ --batch_size 1 ^
108
+ --skip_existing
109
+ if errorlevel 1 (
110
+ > "%STATUS_FILE%" echo errored
111
+ echo [ERROR] Text encoder caching failed.
112
+ exit /b 1
113
+ )
114
+
115
+ > "%STATUS_FILE%" echo prepared
116
+ echo [INFO] Dataset prepared. Run train_only_ernie.bat %DATASET_NAME% to start training.
117
+ exit /b 0
training-scripts/musubi/ernie/train_dataset_ernie.bat ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal EnableExtensions
3
+
4
+ cd /d "%~dp0\..\.."
5
+
6
+ if "%~1"=="" (
7
+ echo Usage: %~nx0 ^<dataset_name^>
8
+ echo Example: %~nx0 aubreyplaza
9
+ exit /b 1
10
+ )
11
+
12
+ set "DATASET_NAME=%~1"
13
+ set "SOURCE_DATASET_DIR=C:\Development\ai-toolkit\datasets\%DATASET_NAME%"
14
+ set "CURRENTSET_DIR=C:\Development\trainers\musubi-tuner\datasets\currentset"
15
+ set "CACHE_DIR=C:\Development\trainers\musubi-tuner\datasets\cache"
16
+ set "STATUS_FILE=C:\Development\trainers\musubi-tuner\datasets\ernie_%DATASET_NAME%.txt"
17
+ set "DATASET_CONFIG=C:\Development\trainers\musubi-tuner\templates\ernie\dataset.currentset.toml"
18
+ set "PYTHON_EXE=C:\Development\trainers\musubi-tuner\venv\Scripts\python.exe"
19
+ set "ERNIE_DIT=C:/Development/ComfyUI/models/diffusion_models/Ernie/ernie-image.safetensors"
20
+ set "ERNIE_VAE=C:/Development/ComfyUI/models/vae/flux2_vae.safetensors"
21
+ set "ERNIE_TEXT_ENCODER=C:/Development/ComfyUI/models/text_encoders/ministral-3-3b.safetensors"
22
+ set "COUNT_FILE=%TEMP%\musubi_ernie_image_count.txt"
23
+ set "EXIT_CODE=0"
24
+ set "IMAGE_COUNT=0"
25
+ set "OUTPUT_NAME=ernie_%DATASET_NAME%_v1"
26
+
27
+ if not exist "%SOURCE_DATASET_DIR%" (
28
+ > "%STATUS_FILE%" echo errored
29
+ echo [ERROR] Dataset directory not found: %SOURCE_DATASET_DIR%
30
+ exit /b 1
31
+ )
32
+
33
+ if not exist "%PYTHON_EXE%" (
34
+ > "%STATUS_FILE%" echo errored
35
+ echo [ERROR] Python executable not found: %PYTHON_EXE%
36
+ exit /b 1
37
+ )
38
+
39
+ "%PYTHON_EXE%" -c "from pathlib import Path; exts={'.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl'}; print(sum(1 for f in Path(r'%SOURCE_DATASET_DIR%').iterdir() if f.is_file() and f.suffix.lower() in exts))" > "%COUNT_FILE%"
40
+ if errorlevel 1 (
41
+ > "%STATUS_FILE%" echo errored
42
+ echo [ERROR] Failed to count supported image files in: %SOURCE_DATASET_DIR%
43
+ exit /b 1
44
+ )
45
+
46
+ set /p IMAGE_COUNT=<"%COUNT_FILE%"
47
+ del /q "%COUNT_FILE%" >nul 2>nul
48
+
49
+ if "%IMAGE_COUNT%"=="0" (
50
+ > "%STATUS_FILE%" echo errored
51
+ echo [ERROR] No supported image files found in: %SOURCE_DATASET_DIR%
52
+ exit /b 1
53
+ )
54
+
55
+ > "%STATUS_FILE%" echo starting
56
+
57
+ echo [INFO] Dataset name : %DATASET_NAME%
58
+ echo [INFO] Source dataset : %SOURCE_DATASET_DIR%
59
+ echo [INFO] Staging dir : %CURRENTSET_DIR%
60
+ echo [INFO] Cache dir : %CACHE_DIR%
61
+ echo [INFO] Status file : %STATUS_FILE%
62
+ echo [INFO] Dataset config : %DATASET_CONFIG%
63
+ echo [INFO] Output prefix : %OUTPUT_NAME%
64
+ echo [INFO] Image count : %IMAGE_COUNT%
65
+
66
+ if not exist "%CURRENTSET_DIR%" mkdir "%CURRENTSET_DIR%"
67
+ if not exist "%CACHE_DIR%" mkdir "%CACHE_DIR%"
68
+
69
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue; Get-ChildItem -LiteralPath '%CACHE_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue"
70
+ if errorlevel 1 (
71
+ > "%STATUS_FILE%" echo errored
72
+ echo [ERROR] Failed to clear staging or cache directories.
73
+ exit /b 1
74
+ )
75
+
76
+ powershell -NoProfile -Command "Copy-Item -LiteralPath (Get-ChildItem -LiteralPath '%SOURCE_DATASET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | Select-Object -ExpandProperty FullName) -Destination '%CURRENTSET_DIR%' -Force"
77
+ if errorlevel 1 (
78
+ > "%STATUS_FILE%" echo errored
79
+ echo [ERROR] Failed to copy dataset images into currentset.
80
+ exit /b 1
81
+ )
82
+
83
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | ForEach-Object { $caption = [System.IO.Path]::ChangeExtension($_.FullName, '.txt'); if (-not (Test-Path -LiteralPath $caption)) { New-Item -ItemType File -Path $caption | Out-Null } }"
84
+ if errorlevel 1 (
85
+ > "%STATUS_FILE%" echo errored
86
+ echo [ERROR] Failed to create empty caption files in currentset.
87
+ set "EXIT_CODE=1"
88
+ goto cleanup
89
+ )
90
+
91
+ echo [INFO] Running latent caching...
92
+ "%PYTHON_EXE%" src\musubi_tuner\ernie_image_cache_latents.py ^
93
+ --dataset_config "%DATASET_CONFIG%" ^
94
+ --vae "%ERNIE_VAE%" ^
95
+ --device cuda ^
96
+ --skip_existing
97
+ if errorlevel 1 (
98
+ > "%STATUS_FILE%" echo errored
99
+ echo [ERROR] Latent caching failed.
100
+ set "EXIT_CODE=1"
101
+ goto cleanup
102
+ )
103
+
104
+ echo [INFO] Running text encoder caching...
105
+ "%PYTHON_EXE%" src\musubi_tuner\ernie_image_cache_text_encoder_outputs.py ^
106
+ --dataset_config "%DATASET_CONFIG%" ^
107
+ --text_encoder "%ERNIE_TEXT_ENCODER%" ^
108
+ --device cuda ^
109
+ --batch_size 1 ^
110
+ --skip_existing
111
+ if errorlevel 1 (
112
+ > "%STATUS_FILE%" echo errored
113
+ echo [ERROR] Text encoder caching failed.
114
+ set "EXIT_CODE=1"
115
+ goto cleanup
116
+ )
117
+
118
+ > "%STATUS_FILE%" echo running
119
+ echo [INFO] Running ERNIE-Image LoRA training...
120
+ "%PYTHON_EXE%" -m accelerate.commands.launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src\musubi_tuner\ernie_image_train_network.py ^
121
+ --mixed_precision bf16 ^
122
+ --dataset_config "%DATASET_CONFIG%" ^
123
+ --dit "%ERNIE_DIT%" ^
124
+ --vae "%ERNIE_VAE%" ^
125
+ --text_encoder "%ERNIE_TEXT_ENCODER%" ^
126
+ --sdpa ^
127
+ --timestep_sampling shift ^
128
+ --weighting_scheme none ^
129
+ --discrete_flow_shift 4.0 ^
130
+ --optimizer_type adamw8bit ^
131
+ --learning_rate 1e-4 ^
132
+ --gradient_checkpointing ^
133
+ --max_data_loader_n_workers 2 ^
134
+ --persistent_data_loader_workers ^
135
+ --fp8_base --fp8_scaled ^
136
+ --fp8_text_encoder ^
137
+ --blocks_to_swap 4 ^
138
+ --use_pinned_memory_for_block_swap ^
139
+ --network_module networks.lora_ernie_image ^
140
+ --network_dim 32 ^
141
+ --network_alpha 32 ^
142
+ --max_train_epochs 100 ^
143
+ --save_every_n_epochs 100 ^
144
+ --seed 42 ^
145
+ --output_dir output ^
146
+ --output_name "%OUTPUT_NAME%"
147
+ if errorlevel 1 (
148
+ > "%STATUS_FILE%" echo errored
149
+ echo [ERROR] Training failed.
150
+ set "EXIT_CODE=1"
151
+ )
152
+
153
+ :cleanup
154
+ echo [INFO] Cleaning staging and cache directories...
155
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue; Get-ChildItem -LiteralPath '%CACHE_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue"
156
+
157
+ if not "%EXIT_CODE%"=="0" (
158
+ echo [ERROR] Wrapper finished with failures.
159
+ exit /b %EXIT_CODE%
160
+ )
161
+
162
+ > "%STATUS_FILE%" echo finished
163
+ echo [INFO] Completed successfully.
164
+ exit /b 0
training-scripts/musubi/ernie/train_only_ernie.bat ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal EnableExtensions
3
+
4
+ cd /d "%~dp0\..\.."
5
+
6
+ if "%~1"=="" (
7
+ echo Usage: %~nx0 ^<dataset_name^>
8
+ echo Example: %~nx0 aubreyplaza
9
+ exit /b 1
10
+ )
11
+
12
+ set "DATASET_NAME=%~1"
13
+ set "STATUS_FILE=C:\Development\trainers\musubi-tuner\datasets\ernie_%DATASET_NAME%.txt"
14
+ set "DATASET_CONFIG=C:\Development\trainers\musubi-tuner\templates\ernie\dataset.currentset.toml"
15
+ set "PYTHON_EXE=C:\Development\trainers\musubi-tuner\venv\Scripts\python.exe"
16
+ set "ERNIE_DIT=C:/Development/ComfyUI/models/diffusion_models/Ernie/ernie-image.safetensors"
17
+ set "ERNIE_VAE=C:/Development/ComfyUI/models/vae/flux2_vae.safetensors"
18
+ set "ERNIE_TEXT_ENCODER=C:/Development/ComfyUI/models/text_encoders/ministral-3-3b.safetensors"
19
+ set "OUTPUT_NAME=ernie_%DATASET_NAME%_v1"
20
+
21
+ if not exist "%PYTHON_EXE%" (
22
+ > "%STATUS_FILE%" echo errored
23
+ echo [ERROR] Python executable not found: %PYTHON_EXE%
24
+ exit /b 1
25
+ )
26
+
27
+ echo [INFO] Dataset name : %DATASET_NAME%
28
+ echo [INFO] Dataset config : %DATASET_CONFIG%
29
+ echo [INFO] Output prefix : %OUTPUT_NAME%
30
+
31
+ > "%STATUS_FILE%" echo running
32
+ echo [INFO] Running ERNIE-Image LoRA training...
33
+ "%PYTHON_EXE%" -m accelerate.commands.launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src\musubi_tuner\ernie_image_train_network.py ^
34
+ --mixed_precision bf16 ^
35
+ --dataset_config "%DATASET_CONFIG%" ^
36
+ --dit "%ERNIE_DIT%" ^
37
+ --vae "%ERNIE_VAE%" ^
38
+ --text_encoder "%ERNIE_TEXT_ENCODER%" ^
39
+ --sdpa ^
40
+ --timestep_sampling shift ^
41
+ --weighting_scheme none ^
42
+ --discrete_flow_shift 4.0 ^
43
+ --optimizer_type adamw8bit ^
44
+ --learning_rate 1e-4 ^
45
+ --gradient_checkpointing ^
46
+ --max_data_loader_n_workers 2 ^
47
+ --persistent_data_loader_workers ^
48
+ --fp8_base --fp8_scaled ^
49
+ --fp8_text_encoder ^
50
+ --blocks_to_swap 4 ^
51
+ --use_pinned_memory_for_block_swap ^
52
+ --network_module networks.lora_ernie_image ^
53
+ --network_dim 32 ^
54
+ --network_alpha 32 ^
55
+ --max_train_epochs 100 ^
56
+ --save_every_n_epochs 100 ^
57
+ --seed 42 ^
58
+ --output_dir output ^
59
+ --output_name "%OUTPUT_NAME%"
60
+ if errorlevel 1 (
61
+ > "%STATUS_FILE%" echo errored
62
+ echo [ERROR] Training failed.
63
+ exit /b 1
64
+ )
65
+
66
+ > "%STATUS_FILE%" echo finished
67
+ echo [INFO] Completed successfully.
68
+ exit /b 0
training-scripts/musubi/ltx23/dataset.currentset.toml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Image-only LTX 2.3 dataset config for staged wrapper runs.
2
+ # The wrapper copies a selected source dataset into datasets/currentset,
3
+ # creates empty .txt sidecars there, then uses this config for caching/training.
4
+
5
+ [general]
6
+ resolution = [512, 512]
7
+ caption_extension = ".txt"
8
+ batch_size = 1
9
+ enable_bucket = true
10
+ bucket_no_upscale = false
11
+
12
+ [[datasets]]
13
+ image_directory = "C:/Development/trainers/musubi-tuner/datasets/currentset"
14
+ cache_directory = "C:/Development/trainers/musubi-tuner/datasets/cache"
15
+ num_repeats = 1
training-scripts/musubi/ltx23/prepare_dataset_ltx23.bat ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal EnableExtensions
3
+
4
+ cd /d "%~dp0\..\.."
5
+
6
+ if "%~1"=="" (
7
+ echo Usage: %~nx0 ^<dataset_name^>
8
+ echo Example: %~nx0 aubreyplaza
9
+ exit /b 1
10
+ )
11
+
12
+ set "DATASET_NAME=%~1"
13
+ set "SOURCE_DATASET_DIR=C:\Development\ai-toolkit\datasets\%DATASET_NAME%"
14
+ set "CURRENTSET_DIR=C:\Development\trainers\musubi-tuner\datasets\currentset"
15
+ set "CACHE_DIR=C:\Development\trainers\musubi-tuner\datasets\cache"
16
+ set "STATUS_FILE=C:\Development\trainers\musubi-tuner\datasets\ltx23_%DATASET_NAME%.txt"
17
+ set "DATASET_CONFIG=C:\Development\trainers\musubi-tuner\templates\ltx23\dataset.currentset.toml"
18
+ set "PYTHON_EXE=C:\Development\trainers\musubi-tuner\venv\Scripts\python.exe"
19
+ set "LTX2_CHECKPOINT=C:/Development/ComfyUI/models/checkpoints/LTX2/ltx-2.3-22b-dev.safetensors"
20
+ set "GEMMA_SAFETENSORS=C:/Development/ComfyUI/models/text_encoders/gemma_3_12B_it_fp8_e4m3fn.safetensors"
21
+ set "COUNT_FILE=%TEMP%\musubi_ltx23_image_count.txt"
22
+ set "IMAGE_COUNT=0"
23
+
24
+ if not exist "%SOURCE_DATASET_DIR%" (
25
+ > "%STATUS_FILE%" echo errored
26
+ echo [ERROR] Dataset directory not found: %SOURCE_DATASET_DIR%
27
+ exit /b 1
28
+ )
29
+
30
+ if not exist "%PYTHON_EXE%" (
31
+ > "%STATUS_FILE%" echo errored
32
+ echo [ERROR] Python executable not found: %PYTHON_EXE%
33
+ exit /b 1
34
+ )
35
+
36
+ "%PYTHON_EXE%" -c "from pathlib import Path; exts={'.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl'}; print(sum(1 for f in Path(r'%SOURCE_DATASET_DIR%').iterdir() if f.is_file() and f.suffix.lower() in exts))" > "%COUNT_FILE%"
37
+ if errorlevel 1 (
38
+ > "%STATUS_FILE%" echo errored
39
+ echo [ERROR] Failed to count supported image files in: %SOURCE_DATASET_DIR%
40
+ exit /b 1
41
+ )
42
+
43
+ set /p IMAGE_COUNT=<"%COUNT_FILE%"
44
+ del /q "%COUNT_FILE%" >nul 2>nul
45
+
46
+ if "%IMAGE_COUNT%"=="0" (
47
+ > "%STATUS_FILE%" echo errored
48
+ echo [ERROR] No supported image files found in: %SOURCE_DATASET_DIR%
49
+ exit /b 1
50
+ )
51
+
52
+ > "%STATUS_FILE%" echo starting
53
+
54
+ echo [INFO] Dataset name : %DATASET_NAME%
55
+ echo [INFO] Source dataset : %SOURCE_DATASET_DIR%
56
+ echo [INFO] Staging dir : %CURRENTSET_DIR%
57
+ echo [INFO] Cache dir : %CACHE_DIR%
58
+ echo [INFO] Status file : %STATUS_FILE%
59
+ echo [INFO] Dataset config : %DATASET_CONFIG%
60
+ echo [INFO] Image count : %IMAGE_COUNT%
61
+
62
+ if not exist "%CURRENTSET_DIR%" mkdir "%CURRENTSET_DIR%"
63
+ if not exist "%CACHE_DIR%" mkdir "%CACHE_DIR%"
64
+
65
+ echo [INFO] Clearing staging and cache directories...
66
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue; Get-ChildItem -LiteralPath '%CACHE_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue"
67
+ if errorlevel 1 (
68
+ > "%STATUS_FILE%" echo errored
69
+ echo [ERROR] Failed to clear staging or cache directories.
70
+ exit /b 1
71
+ )
72
+
73
+ echo [INFO] Copying dataset images to currentset...
74
+ powershell -NoProfile -Command "Copy-Item -LiteralPath (Get-ChildItem -LiteralPath '%SOURCE_DATASET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | Select-Object -ExpandProperty FullName) -Destination '%CURRENTSET_DIR%' -Force"
75
+ if errorlevel 1 (
76
+ > "%STATUS_FILE%" echo errored
77
+ echo [ERROR] Failed to copy dataset images into currentset.
78
+ exit /b 1
79
+ )
80
+
81
+ echo [INFO] Creating empty caption files...
82
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | ForEach-Object { $caption = [System.IO.Path]::ChangeExtension($_.FullName, '.txt'); if (-not (Test-Path -LiteralPath $caption)) { New-Item -ItemType File -Path $caption | Out-Null } }"
83
+ if errorlevel 1 (
84
+ > "%STATUS_FILE%" echo errored
85
+ echo [ERROR] Failed to create empty caption files in currentset.
86
+ exit /b 1
87
+ )
88
+
89
+ echo [INFO] Running latent caching...
90
+ "%PYTHON_EXE%" ltx2_cache_latents.py ^
91
+ --dataset_config "%DATASET_CONFIG%" ^
92
+ --ltx2_checkpoint "%LTX2_CHECKPOINT%" ^
93
+ --device cuda ^
94
+ --vae_dtype bf16 ^
95
+ --ltx2_mode video ^
96
+ --skip_existing
97
+ if errorlevel 1 (
98
+ > "%STATUS_FILE%" echo errored
99
+ echo [ERROR] Latent caching failed.
100
+ exit /b 1
101
+ )
102
+
103
+ echo [INFO] Running text encoder caching...
104
+ "%PYTHON_EXE%" ltx2_cache_text_encoder_outputs.py ^
105
+ --dataset_config "%DATASET_CONFIG%" ^
106
+ --ltx2_checkpoint "%LTX2_CHECKPOINT%" ^
107
+ --gemma_safetensors "%GEMMA_SAFETENSORS%" ^
108
+ --device cuda ^
109
+ --mixed_precision bf16 ^
110
+ --ltx2_mode video ^
111
+ --batch_size 1 ^
112
+ --skip_existing
113
+ if errorlevel 1 (
114
+ > "%STATUS_FILE%" echo errored
115
+ echo [ERROR] Text encoder caching failed.
116
+ exit /b 1
117
+ )
118
+
119
+ > "%STATUS_FILE%" echo prepared
120
+ echo [INFO] Dataset prepared. Run train_only_ltx23.bat %DATASET_NAME% to start training.
121
+ exit /b 0
training-scripts/musubi/ltx23/train_dataset_ltx23.bat ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal EnableExtensions
3
+
4
+ cd /d "%~dp0\..\.."
5
+
6
+ if "%~1"=="" (
7
+ echo Usage: %~nx0 ^<dataset_name^>
8
+ echo Example: %~nx0 aubreyplaza
9
+ exit /b 1
10
+ )
11
+
12
+ set "DATASET_NAME=%~1"
13
+ set "SOURCE_DATASET_DIR=C:\Development\ai-toolkit\datasets\%DATASET_NAME%"
14
+ set "CURRENTSET_DIR=C:\Development\trainers\musubi-tuner\datasets\currentset"
15
+ set "CACHE_DIR=C:\Development\trainers\musubi-tuner\datasets\cache"
16
+ set "STATUS_FILE=C:\Development\trainers\musubi-tuner\datasets\ltx23_%DATASET_NAME%.txt"
17
+ set "DATASET_CONFIG=C:\Development\trainers\musubi-tuner\templates\ltx23\dataset.currentset.toml"
18
+ set "PYTHON_EXE=C:\Development\trainers\musubi-tuner\venv\Scripts\python.exe"
19
+ set "LTX2_CHECKPOINT=C:/Development/ComfyUI/models/checkpoints/LTX2/ltx-2.3-22b-dev.safetensors"
20
+ set "GEMMA_SAFETENSORS=C:/Development/ComfyUI/models/text_encoders/gemma_3_12B_it_fp8_e4m3fn.safetensors"
21
+ set "COUNT_FILE=%TEMP%\musubi_ltx23_image_count.txt"
22
+ set "EXIT_CODE=0"
23
+ set "IMAGE_COUNT=0"
24
+ set "OUTPUT_NAME=ltx23_%DATASET_NAME%_v1"
25
+
26
+ if not exist "%SOURCE_DATASET_DIR%" (
27
+ > "%STATUS_FILE%" echo errored
28
+ echo [ERROR] Dataset directory not found: %SOURCE_DATASET_DIR%
29
+ exit /b 1
30
+ )
31
+
32
+ if not exist "%PYTHON_EXE%" (
33
+ > "%STATUS_FILE%" echo errored
34
+ echo [ERROR] Python executable not found: %PYTHON_EXE%
35
+ exit /b 1
36
+ )
37
+
38
+ "%PYTHON_EXE%" -c "from pathlib import Path; exts={'.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl'}; print(sum(1 for f in Path(r'%SOURCE_DATASET_DIR%').iterdir() if f.is_file() and f.suffix.lower() in exts))" > "%COUNT_FILE%"
39
+ if errorlevel 1 (
40
+ > "%STATUS_FILE%" echo errored
41
+ echo [ERROR] Failed to count supported image files in: %SOURCE_DATASET_DIR%
42
+ exit /b 1
43
+ )
44
+
45
+ set /p IMAGE_COUNT=<"%COUNT_FILE%"
46
+ del /q "%COUNT_FILE%" >nul 2>nul
47
+
48
+ if "%IMAGE_COUNT%"=="0" (
49
+ > "%STATUS_FILE%" echo errored
50
+ echo [ERROR] No supported image files found in: %SOURCE_DATASET_DIR%
51
+ exit /b 1
52
+ )
53
+
54
+ > "%STATUS_FILE%" echo starting
55
+
56
+ echo [INFO] Dataset name : %DATASET_NAME%
57
+ echo [INFO] Source dataset : %SOURCE_DATASET_DIR%
58
+ echo [INFO] Staging dir : %CURRENTSET_DIR%
59
+ echo [INFO] Cache dir : %CACHE_DIR%
60
+ echo [INFO] Status file : %STATUS_FILE%
61
+ echo [INFO] Dataset config : %DATASET_CONFIG%
62
+ echo [INFO] Output prefix : %OUTPUT_NAME%
63
+ echo [INFO] Image count : %IMAGE_COUNT%
64
+
65
+ if not exist "%CURRENTSET_DIR%" mkdir "%CURRENTSET_DIR%"
66
+ if not exist "%CACHE_DIR%" mkdir "%CACHE_DIR%"
67
+
68
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue; Get-ChildItem -LiteralPath '%CACHE_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue"
69
+ if errorlevel 1 (
70
+ > "%STATUS_FILE%" echo errored
71
+ echo [ERROR] Failed to clear staging or cache directories.
72
+ exit /b 1
73
+ )
74
+
75
+ powershell -NoProfile -Command "Copy-Item -LiteralPath (Get-ChildItem -LiteralPath '%SOURCE_DATASET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | Select-Object -ExpandProperty FullName) -Destination '%CURRENTSET_DIR%' -Force"
76
+ if errorlevel 1 (
77
+ > "%STATUS_FILE%" echo errored
78
+ echo [ERROR] Failed to copy dataset images into currentset.
79
+ exit /b 1
80
+ )
81
+
82
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -File | Where-Object { $_.Extension -in '.png','.jpg','.jpeg','.webp','.bmp','.avif','.jxl' } | ForEach-Object { $caption = [System.IO.Path]::ChangeExtension($_.FullName, '.txt'); if (-not (Test-Path -LiteralPath $caption)) { New-Item -ItemType File -Path $caption | Out-Null } }"
83
+ if errorlevel 1 (
84
+ > "%STATUS_FILE%" echo errored
85
+ echo [ERROR] Failed to create empty caption files in currentset.
86
+ set "EXIT_CODE=1"
87
+ goto cleanup
88
+ )
89
+
90
+ echo [INFO] Running latent caching...
91
+ "%PYTHON_EXE%" ltx2_cache_latents.py ^
92
+ --dataset_config "%DATASET_CONFIG%" ^
93
+ --ltx2_checkpoint "%LTX2_CHECKPOINT%" ^
94
+ --device cuda ^
95
+ --vae_dtype bf16 ^
96
+ --ltx2_mode video ^
97
+ --skip_existing
98
+ if errorlevel 1 (
99
+ > "%STATUS_FILE%" echo errored
100
+ echo [ERROR] Latent caching failed.
101
+ set "EXIT_CODE=1"
102
+ goto cleanup
103
+ )
104
+
105
+ echo [INFO] Running text encoder caching...
106
+ "%PYTHON_EXE%" ltx2_cache_text_encoder_outputs.py ^
107
+ --dataset_config "%DATASET_CONFIG%" ^
108
+ --ltx2_checkpoint "%LTX2_CHECKPOINT%" ^
109
+ --gemma_safetensors "%GEMMA_SAFETENSORS%" ^
110
+ --device cuda ^
111
+ --mixed_precision bf16 ^
112
+ --ltx2_mode video ^
113
+ --batch_size 1 ^
114
+ --skip_existing
115
+ if errorlevel 1 (
116
+ > "%STATUS_FILE%" echo errored
117
+ echo [ERROR] Text encoder caching failed.
118
+ set "EXIT_CODE=1"
119
+ goto cleanup
120
+ )
121
+
122
+ > "%STATUS_FILE%" echo running
123
+ echo [INFO] Running optimized training...
124
+ "%PYTHON_EXE%" -m accelerate.commands.launch --num_cpu_threads_per_process 1 --mixed_precision bf16 ltx2_train_network.py --mixed_precision bf16 --dataset_config "%DATASET_CONFIG%" --gemma_safetensors "%GEMMA_SAFETENSORS%" --ltx2_checkpoint "%LTX2_CHECKPOINT%" --ltx_version 2.3 --ltx_version_check_mode error --ltx2_mode video --lora_target_preset video_sa_ca_ff --fp8_base --fp8_scaled --blocks_to_swap 4 --use_pinned_memory_for_block_swap --sdpa --gradient_checkpointing --learning_rate 1e-4 --optimizer_type AdamW --lr_scheduler constant_with_warmup --lr_warmup_steps 100 --max_train_epochs 120 --max_data_loader_n_workers 8 --persistent_data_loader_workers --save_every_n_epochs 120 --network_module networks.lora_ltx2 --network_dim 32 --network_alpha 32 --no_save_original_lora --timestep_sampling shifted_logit_normal --output_dir output --output_name "%OUTPUT_NAME%"
125
+ if errorlevel 1 (
126
+ > "%STATUS_FILE%" echo errored
127
+ echo [ERROR] Training failed.
128
+ set "EXIT_CODE=1"
129
+ )
130
+
131
+ :cleanup
132
+ echo [INFO] Cleaning staging and cache directories...
133
+ powershell -NoProfile -Command "Get-ChildItem -LiteralPath '%CURRENTSET_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue; Get-ChildItem -LiteralPath '%CACHE_DIR%' -Force -ErrorAction SilentlyContinue | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue"
134
+
135
+ if not "%EXIT_CODE%"=="0" (
136
+ echo [ERROR] Wrapper finished with failures.
137
+ exit /b %EXIT_CODE%
138
+ )
139
+
140
+ > "%STATUS_FILE%" echo finished
141
+ echo [INFO] Completed successfully.
142
+ exit /b 0
training-scripts/musubi/ltx23/train_only_ltx23.bat ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ setlocal EnableExtensions
3
+
4
+ cd /d "%~dp0\..\.."
5
+
6
+ if "%~1"=="" (
7
+ echo Usage: %~nx0 ^<dataset_name^>
8
+ echo Example: %~nx0 aubreyplaza
9
+ exit /b 1
10
+ )
11
+
12
+ set "DATASET_NAME=%~1"
13
+ set "STATUS_FILE=C:\Development\trainers\musubi-tuner\datasets\ltx23_%DATASET_NAME%.txt"
14
+ set "DATASET_CONFIG=C:\Development\trainers\musubi-tuner\templates\ltx23\dataset.currentset.toml"
15
+ set "PYTHON_EXE=C:\Development\trainers\musubi-tuner\venv\Scripts\python.exe"
16
+ set "LTX2_CHECKPOINT=C:/Development/ComfyUI/models/checkpoints/LTX2/ltx-2.3-22b-dev.safetensors"
17
+ set "GEMMA_SAFETENSORS=C:/Development/ComfyUI/models/text_encoders/gemma_3_12B_it_fp8_e4m3fn.safetensors"
18
+ set "OUTPUT_NAME=ltx23_%DATASET_NAME%_v1"
19
+
20
+ if not exist "%PYTHON_EXE%" (
21
+ > "%STATUS_FILE%" echo errored
22
+ echo [ERROR] Python executable not found: %PYTHON_EXE%
23
+ exit /b 1
24
+ )
25
+
26
+ echo [INFO] Dataset name : %DATASET_NAME%
27
+ echo [INFO] Dataset config : %DATASET_CONFIG%
28
+ echo [INFO] Output prefix : %OUTPUT_NAME%
29
+
30
+ > "%STATUS_FILE%" echo running
31
+ echo [INFO] Running optimized training...
32
+ "%PYTHON_EXE%" -m accelerate.commands.launch --num_cpu_threads_per_process 1 --mixed_precision bf16 ltx2_train_network.py --mixed_precision bf16 --dataset_config "%DATASET_CONFIG%" --gemma_safetensors "%GEMMA_SAFETENSORS%" --ltx2_checkpoint "%LTX2_CHECKPOINT%" --ltx_version 2.3 --ltx_version_check_mode error --ltx2_mode video --lora_target_preset video_sa_ca_ff --fp8_base --fp8_scaled --blocks_to_swap 4 --use_pinned_memory_for_block_swap --sdpa --gradient_checkpointing --learning_rate 1e-4 --optimizer_type AdamW --lr_scheduler constant_with_warmup --lr_warmup_steps 100 --max_train_epochs 120 --max_data_loader_n_workers 8 --persistent_data_loader_workers --save_every_n_epochs 120 --network_module networks.lora_ltx2 --network_dim 32 --network_alpha 32 --no_save_original_lora --timestep_sampling shifted_logit_normal --output_dir output --output_name "%OUTPUT_NAME%"
33
+ if errorlevel 1 (
34
+ > "%STATUS_FILE%" echo errored
35
+ echo [ERROR] Training failed.
36
+ exit /b 1
37
+ )
38
+
39
+ > "%STATUS_FILE%" echo finished
40
+ echo [INFO] Completed successfully.
41
+ exit /b 0
training-scripts/onetrainer/ernie_template.json ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version": 10,
3
+ "training_method": "LORA",
4
+ "model_type": "ERNIE",
5
+ "debug_mode": false,
6
+ "debug_dir": "debug",
7
+ "workspace_dir": "workspace/run",
8
+ "cache_dir": "workspace-cache/run",
9
+ "tensorboard": true,
10
+ "tensorboard_expose": false,
11
+ "tensorboard_always_on": false,
12
+ "tensorboard_port": 6006,
13
+ "validation": false,
14
+ "validate_after": 1,
15
+ "validate_after_unit": "EPOCH",
16
+ "continue_last_backup": false,
17
+ "prevent_overwrites": false,
18
+ "include_train_config": "NONE",
19
+ "multi_gpu": false,
20
+ "device_indexes": "",
21
+ "gradient_reduce_precision": "FLOAT_32_STOCHASTIC",
22
+ "fused_gradient_reduce": true,
23
+ "async_gradient_reduce": true,
24
+ "async_gradient_reduce_buffer": 100,
25
+ "base_model_name": "baidu/ERNIE-Image",
26
+ "output_dtype": "BFLOAT_16",
27
+ "output_model_format": "SAFETENSORS",
28
+ "output_model_destination": "models/ernie_[person_to_train]_v1_onetrainer.safetensors",
29
+ "gradient_checkpointing": "ON",
30
+ "enable_async_offloading": true,
31
+ "enable_activation_offloading": true,
32
+ "layer_offload_fraction": 0.0,
33
+ "force_circular_padding": false,
34
+ "compile": true,
35
+ "concept_file_name": "training_concepts/concepts.json",
36
+ "concepts": [
37
+ {
38
+ "__version": 2,
39
+ "image": {
40
+ "__version": 0,
41
+ "enable_crop_jitter": true,
42
+ "enable_random_flip": false,
43
+ "enable_fixed_flip": false,
44
+ "enable_random_rotate": false,
45
+ "enable_fixed_rotate": false,
46
+ "random_rotate_max_angle": 0.0,
47
+ "enable_random_brightness": false,
48
+ "enable_fixed_brightness": false,
49
+ "random_brightness_max_strength": 0.0,
50
+ "enable_random_contrast": false,
51
+ "enable_fixed_contrast": false,
52
+ "random_contrast_max_strength": 0.0,
53
+ "enable_random_saturation": false,
54
+ "enable_fixed_saturation": false,
55
+ "random_saturation_max_strength": 0.0,
56
+ "enable_random_hue": false,
57
+ "enable_fixed_hue": false,
58
+ "random_hue_max_strength": 0.0,
59
+ "enable_resolution_override": false,
60
+ "resolution_override": "512",
61
+ "enable_random_circular_mask_shrink": false,
62
+ "enable_random_mask_rotate_crop": false
63
+ },
64
+ "text": {
65
+ "__version": 0,
66
+ "prompt_source": "sample",
67
+ "prompt_path": "",
68
+ "enable_tag_shuffling": false,
69
+ "tag_delimiter": ",",
70
+ "keep_tags_count": 1,
71
+ "tag_dropout_enable": false,
72
+ "tag_dropout_mode": "FULL",
73
+ "tag_dropout_probability": 0.0,
74
+ "tag_dropout_special_tags_mode": "NONE",
75
+ "tag_dropout_special_tags": "",
76
+ "tag_dropout_special_tags_regex": false,
77
+ "caps_randomize_enable": false,
78
+ "caps_randomize_mode": "capslock, title, first, random",
79
+ "caps_randomize_probability": 0.0,
80
+ "caps_randomize_lowercase": false
81
+ },
82
+ "name": "",
83
+ "path": "C:/Development/ai-toolkit/datasets/[person_to_train]",
84
+ "seed": -469069486,
85
+ "enabled": true,
86
+ "type": "STANDARD",
87
+ "include_subdirectories": false,
88
+ "image_variations": 1,
89
+ "text_variations": 1,
90
+ "balancing": 1.0,
91
+ "balancing_strategy": "REPEATS",
92
+ "loss_weight": 1.0,
93
+ "concept_stats": {}
94
+ }
95
+ ],
96
+ "aspect_ratio_bucketing": true,
97
+ "latent_caching": true,
98
+ "clear_cache_before_training": true,
99
+ "learning_rate_scheduler": "CONSTANT",
100
+ "custom_learning_rate_scheduler": null,
101
+ "scheduler_params": [],
102
+ "learning_rate": 0.0003,
103
+ "learning_rate_warmup_steps": 200.0,
104
+ "learning_rate_cycles": 1.0,
105
+ "learning_rate_min_factor": 0.0,
106
+ "epochs": 120,
107
+ "batch_size": 2,
108
+ "gradient_accumulation_steps": 1,
109
+ "ema": "OFF",
110
+ "ema_decay": 0.999,
111
+ "ema_update_step_interval": 5,
112
+ "dataloader_threads": 1,
113
+ "train_device": "cuda",
114
+ "temp_device": "cpu",
115
+ "train_dtype": "BFLOAT_16",
116
+ "fallback_train_dtype": "BFLOAT_16",
117
+ "enable_autocast_cache": true,
118
+ "only_cache": false,
119
+ "resolution": "512",
120
+ "frames": "25",
121
+ "mse_strength": 1.0,
122
+ "mae_strength": 0.0,
123
+ "log_cosh_strength": 0.0,
124
+ "huber_strength": 0.0,
125
+ "huber_delta": 1.0,
126
+ "vb_loss_strength": 1.0,
127
+ "loss_weight_fn": "CONSTANT",
128
+ "loss_weight_strength": 5.0,
129
+ "dropout_probability": 0.0,
130
+ "loss_scaler": "NONE",
131
+ "learning_rate_scaler": "NONE",
132
+ "clip_grad_norm": 1.0,
133
+ "offset_noise_weight": 0.0,
134
+ "generalized_offset_noise": false,
135
+ "perturbation_noise_weight": 0.0,
136
+ "rescale_noise_scheduler_to_zero_terminal_snr": false,
137
+ "force_v_prediction": false,
138
+ "force_epsilon_prediction": false,
139
+ "min_noising_strength": 0.0,
140
+ "max_noising_strength": 1.0,
141
+ "timestep_distribution": "LOGIT_NORMAL",
142
+ "noising_weight": 0.0,
143
+ "noising_bias": 0.0,
144
+ "timestep_shift": 1.0,
145
+ "dynamic_timestep_shifting": false,
146
+ "unet": {
147
+ "__version": 0,
148
+ "model_name": "",
149
+ "include": true,
150
+ "train": true,
151
+ "stop_training_after": 0,
152
+ "stop_training_after_unit": "NEVER",
153
+ "learning_rate": null,
154
+ "weight_dtype": "FLOAT_32",
155
+ "dropout_probability": 0.0,
156
+ "train_embedding": true,
157
+ "attention_mask": false,
158
+ "guidance_scale": 1.0
159
+ },
160
+ "prior": {
161
+ "__version": 0,
162
+ "model_name": "",
163
+ "include": true,
164
+ "train": true,
165
+ "stop_training_after": 0,
166
+ "stop_training_after_unit": "NEVER",
167
+ "learning_rate": null,
168
+ "weight_dtype": "FLOAT_32",
169
+ "dropout_probability": 0.0,
170
+ "train_embedding": true,
171
+ "attention_mask": false,
172
+ "guidance_scale": 1.0
173
+ },
174
+ "transformer": {
175
+ "__version": 0,
176
+ "model_name": "",
177
+ "include": true,
178
+ "train": true,
179
+ "stop_training_after": 0,
180
+ "stop_training_after_unit": "NEVER",
181
+ "learning_rate": null,
182
+ "weight_dtype": "INT_W8A8",
183
+ "dropout_probability": 0.0,
184
+ "train_embedding": true,
185
+ "attention_mask": false,
186
+ "guidance_scale": 1.0
187
+ },
188
+ "quantization": {
189
+ "__version": 0,
190
+ "layer_filter": "layers",
191
+ "layer_filter_preset": "blocks",
192
+ "layer_filter_regex": false,
193
+ "svd_dtype": "NONE",
194
+ "svd_rank": 16,
195
+ "cache_dir": null
196
+ },
197
+ "text_encoder": {
198
+ "__version": 0,
199
+ "model_name": "",
200
+ "include": true,
201
+ "train": false,
202
+ "stop_training_after": 30,
203
+ "stop_training_after_unit": "EPOCH",
204
+ "learning_rate": null,
205
+ "weight_dtype": "FLOAT_8",
206
+ "dropout_probability": 0.0,
207
+ "train_embedding": true,
208
+ "attention_mask": false,
209
+ "guidance_scale": 1.0
210
+ },
211
+ "text_encoder_layer_skip": 0,
212
+ "text_encoder_sequence_length": 512,
213
+ "text_encoder_2": {
214
+ "__version": 0,
215
+ "model_name": "",
216
+ "include": true,
217
+ "train": true,
218
+ "stop_training_after": 30,
219
+ "stop_training_after_unit": "EPOCH",
220
+ "learning_rate": null,
221
+ "weight_dtype": "FLOAT_32",
222
+ "dropout_probability": 0.0,
223
+ "train_embedding": true,
224
+ "attention_mask": false,
225
+ "guidance_scale": 1.0
226
+ },
227
+ "text_encoder_2_layer_skip": 0,
228
+ "text_encoder_2_sequence_length": 77,
229
+ "text_encoder_3": {
230
+ "__version": 0,
231
+ "model_name": "",
232
+ "include": true,
233
+ "train": true,
234
+ "stop_training_after": 30,
235
+ "stop_training_after_unit": "EPOCH",
236
+ "learning_rate": null,
237
+ "weight_dtype": "FLOAT_32",
238
+ "dropout_probability": 0.0,
239
+ "train_embedding": true,
240
+ "attention_mask": false,
241
+ "guidance_scale": 1.0
242
+ },
243
+ "text_encoder_3_layer_skip": 0,
244
+ "text_encoder_4": {
245
+ "__version": 0,
246
+ "model_name": "",
247
+ "include": true,
248
+ "train": true,
249
+ "stop_training_after": 30,
250
+ "stop_training_after_unit": "EPOCH",
251
+ "learning_rate": null,
252
+ "weight_dtype": "FLOAT_32",
253
+ "dropout_probability": 0.0,
254
+ "train_embedding": true,
255
+ "attention_mask": false,
256
+ "guidance_scale": 1.0
257
+ },
258
+ "text_encoder_4_layer_skip": 0,
259
+ "vae": {
260
+ "__version": 0,
261
+ "model_name": "",
262
+ "include": true,
263
+ "train": true,
264
+ "stop_training_after": null,
265
+ "stop_training_after_unit": "NEVER",
266
+ "learning_rate": null,
267
+ "weight_dtype": "FLOAT_32",
268
+ "dropout_probability": 0.0,
269
+ "train_embedding": true,
270
+ "attention_mask": false,
271
+ "guidance_scale": 1.0
272
+ },
273
+ "effnet_encoder": {
274
+ "__version": 0,
275
+ "model_name": "",
276
+ "include": true,
277
+ "train": true,
278
+ "stop_training_after": null,
279
+ "stop_training_after_unit": "NEVER",
280
+ "learning_rate": null,
281
+ "weight_dtype": "FLOAT_32",
282
+ "dropout_probability": 0.0,
283
+ "train_embedding": true,
284
+ "attention_mask": false,
285
+ "guidance_scale": 1.0
286
+ },
287
+ "decoder": {
288
+ "__version": 0,
289
+ "model_name": "",
290
+ "include": true,
291
+ "train": true,
292
+ "stop_training_after": null,
293
+ "stop_training_after_unit": "NEVER",
294
+ "learning_rate": null,
295
+ "weight_dtype": "FLOAT_32",
296
+ "dropout_probability": 0.0,
297
+ "train_embedding": true,
298
+ "attention_mask": false,
299
+ "guidance_scale": 1.0
300
+ },
301
+ "decoder_text_encoder": {
302
+ "__version": 0,
303
+ "model_name": "",
304
+ "include": true,
305
+ "train": true,
306
+ "stop_training_after": null,
307
+ "stop_training_after_unit": "NEVER",
308
+ "learning_rate": null,
309
+ "weight_dtype": "FLOAT_32",
310
+ "dropout_probability": 0.0,
311
+ "train_embedding": true,
312
+ "attention_mask": false,
313
+ "guidance_scale": 1.0
314
+ },
315
+ "decoder_vqgan": {
316
+ "__version": 0,
317
+ "model_name": "",
318
+ "include": true,
319
+ "train": true,
320
+ "stop_training_after": null,
321
+ "stop_training_after_unit": "NEVER",
322
+ "learning_rate": null,
323
+ "weight_dtype": "FLOAT_32",
324
+ "dropout_probability": 0.0,
325
+ "train_embedding": true,
326
+ "attention_mask": false,
327
+ "guidance_scale": 1.0
328
+ },
329
+ "masked_training": false,
330
+ "unmasked_probability": 0.1,
331
+ "unmasked_weight": 0.1,
332
+ "normalize_masked_area_loss": false,
333
+ "masked_prior_preservation_weight": 0.0,
334
+ "custom_conditioning_image": false,
335
+ "layer_filter": "self_attention,mlp",
336
+ "layer_filter_preset": "attn-mlp",
337
+ "layer_filter_regex": false,
338
+ "embedding_learning_rate": null,
339
+ "preserve_embedding_norm": false,
340
+ "embedding": {
341
+ "__version": 0,
342
+ "uuid": "55a5c9af-2ee6-4098-97a3-cc66342dc887",
343
+ "model_name": "",
344
+ "placeholder": "<embedding>",
345
+ "train": true,
346
+ "stop_training_after": null,
347
+ "stop_training_after_unit": "NEVER",
348
+ "token_count": 1,
349
+ "initial_embedding_text": "*",
350
+ "is_output_embedding": false
351
+ },
352
+ "additional_embeddings": [],
353
+ "embedding_weight_dtype": "FLOAT_32",
354
+ "cloud": {
355
+ "__version": 0,
356
+ "enabled": false,
357
+ "type": "RUNPOD",
358
+ "file_sync": "NATIVE_SCP",
359
+ "create": true,
360
+ "name": "OneTrainer",
361
+ "tensorboard_tunnel": true,
362
+ "sub_type": "",
363
+ "gpu_type": "",
364
+ "volume_size": 100,
365
+ "min_download": 0,
366
+ "remote_dir": "/workspace",
367
+ "huggingface_cache_dir": "/workspace/huggingface_cache",
368
+ "onetrainer_dir": "/workspace/OneTrainer",
369
+ "install_cmd": "git clone https://github.com/Nerogar/OneTrainer",
370
+ "install_onetrainer": true,
371
+ "update_onetrainer": true,
372
+ "detach_trainer": false,
373
+ "run_id": "job1",
374
+ "download_samples": true,
375
+ "download_output_model": true,
376
+ "download_saves": true,
377
+ "download_backups": false,
378
+ "download_tensorboard": false,
379
+ "delete_workspace": false,
380
+ "on_finish": "NONE",
381
+ "on_error": "NONE",
382
+ "on_detached_finish": "NONE",
383
+ "on_detached_error": "NONE"
384
+ },
385
+ "peft_type": "LORA",
386
+ "lora_model_name": "",
387
+ "lora_rank": 16,
388
+ "lora_alpha": 1.0,
389
+ "lora_decompose": false,
390
+ "lora_decompose_norm_epsilon": true,
391
+ "lora_decompose_output_axis": false,
392
+ "lora_weight_dtype": "FLOAT_32",
393
+ "bundle_additional_embeddings": true,
394
+ "oft_block_size": 32,
395
+ "oft_coft": false,
396
+ "coft_eps": 0.0001,
397
+ "oft_block_share": false,
398
+ "optimizer": {
399
+ "__version": 0,
400
+ "optimizer": "ADAMW",
401
+ "adam_w_mode": false,
402
+ "alpha": null,
403
+ "amsgrad": false,
404
+ "beta1": 0.9,
405
+ "beta2": 0.999,
406
+ "beta3": null,
407
+ "bias_correction": false,
408
+ "block_wise": false,
409
+ "capturable": false,
410
+ "centered": false,
411
+ "clip_threshold": null,
412
+ "d0": null,
413
+ "d_coef": null,
414
+ "dampening": null,
415
+ "decay_rate": null,
416
+ "decouple": false,
417
+ "differentiable": false,
418
+ "eps": 1e-8,
419
+ "eps2": null,
420
+ "foreach": false,
421
+ "fsdp_in_use": false,
422
+ "fused": true,
423
+ "fused_back_pass": false,
424
+ "growth_rate": null,
425
+ "initial_accumulator_value": null,
426
+ "initial_accumulator": null,
427
+ "is_paged": false,
428
+ "log_every": null,
429
+ "lr_decay": null,
430
+ "max_unorm": null,
431
+ "maximize": false,
432
+ "min_8bit_size": null,
433
+ "quant_block_size": null,
434
+ "momentum": null,
435
+ "nesterov": false,
436
+ "no_prox": false,
437
+ "optim_bits": null,
438
+ "percentile_clipping": null,
439
+ "r": null,
440
+ "relative_step": false,
441
+ "safeguard_warmup": false,
442
+ "scale_parameter": false,
443
+ "stochastic_rounding": false,
444
+ "use_bias_correction": false,
445
+ "use_triton": false,
446
+ "warmup_init": false,
447
+ "weight_decay": 0.01,
448
+ "weight_lr_power": null,
449
+ "decoupled_decay": false,
450
+ "fixed_decay": false,
451
+ "rectify": false,
452
+ "degenerated_to_sgd": false,
453
+ "k": null,
454
+ "xi": null,
455
+ "n_sma_threshold": null,
456
+ "ams_bound": false,
457
+ "adanorm": false,
458
+ "adam_debias": false,
459
+ "slice_p": null,
460
+ "cautious": false,
461
+ "weight_decay_by_lr": true,
462
+ "prodigy_steps": null,
463
+ "use_speed": false,
464
+ "split_groups": true,
465
+ "split_groups_mean": true,
466
+ "factored": true,
467
+ "factored_fp32": true,
468
+ "use_stableadamw": true,
469
+ "use_cautious": false,
470
+ "use_grams": false,
471
+ "use_adopt": false,
472
+ "d_limiter": true,
473
+ "use_schedulefree": true,
474
+ "use_orthograd": false,
475
+ "nnmf_factor": false,
476
+ "orthogonal_gradient": false,
477
+ "use_atan2": false,
478
+ "use_AdEMAMix": false,
479
+ "beta3_ema": null,
480
+ "alpha_grad": null,
481
+ "beta1_warmup": null,
482
+ "min_beta1": null,
483
+ "Simplified_AdEMAMix": false,
484
+ "kourkoutas_beta": false,
485
+ "schedulefree_c": null,
486
+ "ns_steps": null,
487
+ "MuonWithAuxAdam": false,
488
+ "muon_hidden_layers": null,
489
+ "muon_adam_regex": false,
490
+ "muon_adam_lr": null,
491
+ "muon_te1_adam_lr": null,
492
+ "muon_te2_adam_lr": null,
493
+ "muon_adam_config": {},
494
+ "rms_rescaling": true,
495
+ "normuon_variant": false,
496
+ "beta2_normuon": null,
497
+ "low_rank_ortho": false,
498
+ "ortho_rank": null,
499
+ "accelerated_ns": false,
500
+ "cautious_wd": false,
501
+ "approx_mars": false,
502
+ "auto_kappa_p": false,
503
+ "compile": false
504
+ },
505
+ "optimizer_defaults": {},
506
+ "sample_definition_file_name": "training_samples/samples.json",
507
+ "samples": [],
508
+ "sample_after": 10,
509
+ "sample_after_unit": "MINUTE",
510
+ "sample_skip_first": 0,
511
+ "sample_image_format": "JPG",
512
+ "sample_video_format": "MP4",
513
+ "sample_audio_format": "MP3",
514
+ "samples_to_tensorboard": true,
515
+ "non_ema_sampling": true,
516
+ "backup_after": 30,
517
+ "backup_after_unit": "MINUTE",
518
+ "rolling_backup": true,
519
+ "rolling_backup_count": 2,
520
+ "backup_before_save": true,
521
+ "save_every": 0,
522
+ "save_every_unit": "NEVER",
523
+ "save_skip_first": 0,
524
+ "save_filename_prefix": ""
525
+ }
training-scripts/onetrainer/fk9_prodigy_template.json ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version": 10,
3
+ "training_method": "LORA",
4
+ "model_type": "FLUX_2",
5
+ "debug_mode": false,
6
+ "debug_dir": "debug",
7
+ "workspace_dir": "workspace/run",
8
+ "cache_dir": "workspace-cache/run",
9
+ "tensorboard": true,
10
+ "tensorboard_expose": false,
11
+ "tensorboard_always_on": false,
12
+ "tensorboard_port": 6006,
13
+ "validation": false,
14
+ "validate_after": 1,
15
+ "validate_after_unit": "EPOCH",
16
+ "continue_last_backup": false,
17
+ "prevent_overwrites": false,
18
+ "include_train_config": "NONE",
19
+ "multi_gpu": false,
20
+ "device_indexes": "",
21
+ "gradient_reduce_precision": "FLOAT_32_STOCHASTIC",
22
+ "fused_gradient_reduce": true,
23
+ "async_gradient_reduce": true,
24
+ "async_gradient_reduce_buffer": 100,
25
+ "base_model_name": "black-forest-labs/FLUX.2-klein-base-9B",
26
+ "output_dtype": "BFLOAT_16",
27
+ "output_model_format": "SAFETENSORS",
28
+ "output_model_destination": "models/fk9_[person_to_train]_v1_prodigy.safetensors",
29
+ "gradient_checkpointing": "ON",
30
+ "enable_async_offloading": true,
31
+ "enable_activation_offloading": true,
32
+ "layer_offload_fraction": 0.0,
33
+ "force_circular_padding": false,
34
+ "compile": true,
35
+ "concept_file_name": "training_concepts/concepts.json",
36
+ "concepts": [
37
+ {
38
+ "__version": 2,
39
+ "image": {
40
+ "__version": 0,
41
+ "enable_crop_jitter": true,
42
+ "enable_random_flip": false,
43
+ "enable_fixed_flip": false,
44
+ "enable_random_rotate": false,
45
+ "enable_fixed_rotate": false,
46
+ "random_rotate_max_angle": 0.0,
47
+ "enable_random_brightness": false,
48
+ "enable_fixed_brightness": false,
49
+ "random_brightness_max_strength": 0.0,
50
+ "enable_random_contrast": false,
51
+ "enable_fixed_contrast": false,
52
+ "random_contrast_max_strength": 0.0,
53
+ "enable_random_saturation": false,
54
+ "enable_fixed_saturation": false,
55
+ "random_saturation_max_strength": 0.0,
56
+ "enable_random_hue": false,
57
+ "enable_fixed_hue": false,
58
+ "random_hue_max_strength": 0.0,
59
+ "enable_resolution_override": false,
60
+ "resolution_override": "512",
61
+ "enable_random_circular_mask_shrink": false,
62
+ "enable_random_mask_rotate_crop": false
63
+ },
64
+ "text": {
65
+ "__version": 0,
66
+ "prompt_source": "sample",
67
+ "prompt_path": "",
68
+ "enable_tag_shuffling": false,
69
+ "tag_delimiter": ",",
70
+ "keep_tags_count": 1,
71
+ "tag_dropout_enable": false,
72
+ "tag_dropout_mode": "FULL",
73
+ "tag_dropout_probability": 0.0,
74
+ "tag_dropout_special_tags_mode": "NONE",
75
+ "tag_dropout_special_tags": "",
76
+ "tag_dropout_special_tags_regex": false,
77
+ "caps_randomize_enable": false,
78
+ "caps_randomize_mode": "capslock, title, first, random",
79
+ "caps_randomize_probability": 0.0,
80
+ "caps_randomize_lowercase": false
81
+ },
82
+ "name": "",
83
+ "path": "C:/Development/ai-toolkit/datasets/[person_to_train]",
84
+ "seed": -469069486,
85
+ "enabled": true,
86
+ "type": "STANDARD",
87
+ "include_subdirectories": false,
88
+ "image_variations": 1,
89
+ "text_variations": 1,
90
+ "balancing": 1.0,
91
+ "balancing_strategy": "REPEATS",
92
+ "loss_weight": 1.0,
93
+ "concept_stats": {}
94
+ }
95
+ ],
96
+ "aspect_ratio_bucketing": true,
97
+ "latent_caching": true,
98
+ "clear_cache_before_training": true,
99
+ "learning_rate_scheduler": "CONSTANT",
100
+ "custom_learning_rate_scheduler": null,
101
+ "scheduler_params": [],
102
+ "learning_rate": 1.0,
103
+ "learning_rate_warmup_steps": 200.0,
104
+ "learning_rate_cycles": 1.0,
105
+ "learning_rate_min_factor": 0.0,
106
+ "epochs": 100,
107
+ "batch_size": 2,
108
+ "gradient_accumulation_steps": 1,
109
+ "ema": "OFF",
110
+ "ema_decay": 0.999,
111
+ "ema_update_step_interval": 5,
112
+ "dataloader_threads": 1,
113
+ "train_device": "cuda",
114
+ "temp_device": "cpu",
115
+ "train_dtype": "BFLOAT_16",
116
+ "fallback_train_dtype": "BFLOAT_16",
117
+ "enable_autocast_cache": true,
118
+ "only_cache": false,
119
+ "resolution": "512",
120
+ "frames": "25",
121
+ "mse_strength": 1.0,
122
+ "mae_strength": 0.0,
123
+ "log_cosh_strength": 0.0,
124
+ "huber_strength": 0.0,
125
+ "huber_delta": 1.0,
126
+ "vb_loss_strength": 1.0,
127
+ "loss_weight_fn": "CONSTANT",
128
+ "loss_weight_strength": 5.0,
129
+ "dropout_probability": 0.0,
130
+ "loss_scaler": "NONE",
131
+ "learning_rate_scaler": "NONE",
132
+ "clip_grad_norm": 1.0,
133
+ "offset_noise_weight": 0.0,
134
+ "generalized_offset_noise": false,
135
+ "perturbation_noise_weight": 0.0,
136
+ "rescale_noise_scheduler_to_zero_terminal_snr": false,
137
+ "force_v_prediction": false,
138
+ "force_epsilon_prediction": false,
139
+ "min_noising_strength": 0.0,
140
+ "max_noising_strength": 1.0,
141
+ "timestep_distribution": "LOGIT_NORMAL",
142
+ "noising_weight": 0.0,
143
+ "noising_bias": 0.0,
144
+ "timestep_shift": 1.0,
145
+ "dynamic_timestep_shifting": false,
146
+ "unet": {
147
+ "__version": 0,
148
+ "model_name": "",
149
+ "include": true,
150
+ "train": true,
151
+ "stop_training_after": 0,
152
+ "stop_training_after_unit": "NEVER",
153
+ "learning_rate": null,
154
+ "weight_dtype": "FLOAT_32",
155
+ "dropout_probability": 0.0,
156
+ "train_embedding": true,
157
+ "attention_mask": false,
158
+ "guidance_scale": 1.0
159
+ },
160
+ "prior": {
161
+ "__version": 0,
162
+ "model_name": "",
163
+ "include": true,
164
+ "train": true,
165
+ "stop_training_after": 0,
166
+ "stop_training_after_unit": "NEVER",
167
+ "learning_rate": null,
168
+ "weight_dtype": "FLOAT_32",
169
+ "dropout_probability": 0.0,
170
+ "train_embedding": true,
171
+ "attention_mask": false,
172
+ "guidance_scale": 1.0
173
+ },
174
+ "transformer": {
175
+ "__version": 0,
176
+ "model_name": "",
177
+ "include": true,
178
+ "train": true,
179
+ "stop_training_after": 0,
180
+ "stop_training_after_unit": "NEVER",
181
+ "learning_rate": null,
182
+ "weight_dtype": "FLOAT_8",
183
+ "dropout_probability": 0.0,
184
+ "train_embedding": true,
185
+ "attention_mask": false,
186
+ "guidance_scale": 1.0
187
+ },
188
+ "quantization": {
189
+ "__version": 0,
190
+ "layer_filter": "transformer_block",
191
+ "layer_filter_preset": "blocks",
192
+ "layer_filter_regex": false,
193
+ "svd_dtype": "NONE",
194
+ "svd_rank": 16,
195
+ "cache_dir": "workspace-cache/run/quantization"
196
+ },
197
+ "text_encoder": {
198
+ "__version": 0,
199
+ "model_name": "",
200
+ "include": true,
201
+ "train": false,
202
+ "stop_training_after": 30,
203
+ "stop_training_after_unit": "EPOCH",
204
+ "learning_rate": null,
205
+ "weight_dtype": "FLOAT_8",
206
+ "dropout_probability": 0.0,
207
+ "train_embedding": true,
208
+ "attention_mask": false,
209
+ "guidance_scale": 1.0
210
+ },
211
+ "text_encoder_layer_skip": 0,
212
+ "text_encoder_sequence_length": 512,
213
+ "text_encoder_2": {
214
+ "__version": 0,
215
+ "model_name": "",
216
+ "include": true,
217
+ "train": true,
218
+ "stop_training_after": 30,
219
+ "stop_training_after_unit": "EPOCH",
220
+ "learning_rate": null,
221
+ "weight_dtype": "FLOAT_32",
222
+ "dropout_probability": 0.0,
223
+ "train_embedding": true,
224
+ "attention_mask": false,
225
+ "guidance_scale": 1.0
226
+ },
227
+ "text_encoder_2_layer_skip": 0,
228
+ "text_encoder_2_sequence_length": 77,
229
+ "text_encoder_3": {
230
+ "__version": 0,
231
+ "model_name": "",
232
+ "include": true,
233
+ "train": true,
234
+ "stop_training_after": 30,
235
+ "stop_training_after_unit": "EPOCH",
236
+ "learning_rate": null,
237
+ "weight_dtype": "FLOAT_32",
238
+ "dropout_probability": 0.0,
239
+ "train_embedding": true,
240
+ "attention_mask": false,
241
+ "guidance_scale": 1.0
242
+ },
243
+ "text_encoder_3_layer_skip": 0,
244
+ "text_encoder_4": {
245
+ "__version": 0,
246
+ "model_name": "",
247
+ "include": true,
248
+ "train": true,
249
+ "stop_training_after": 30,
250
+ "stop_training_after_unit": "EPOCH",
251
+ "learning_rate": null,
252
+ "weight_dtype": "FLOAT_32",
253
+ "dropout_probability": 0.0,
254
+ "train_embedding": true,
255
+ "attention_mask": false,
256
+ "guidance_scale": 1.0
257
+ },
258
+ "text_encoder_4_layer_skip": 0,
259
+ "vae": {
260
+ "__version": 0,
261
+ "model_name": "",
262
+ "include": true,
263
+ "train": true,
264
+ "stop_training_after": null,
265
+ "stop_training_after_unit": "NEVER",
266
+ "learning_rate": null,
267
+ "weight_dtype": "FLOAT_32",
268
+ "dropout_probability": 0.0,
269
+ "train_embedding": true,
270
+ "attention_mask": false,
271
+ "guidance_scale": 1.0
272
+ },
273
+ "effnet_encoder": {
274
+ "__version": 0,
275
+ "model_name": "",
276
+ "include": true,
277
+ "train": true,
278
+ "stop_training_after": null,
279
+ "stop_training_after_unit": "NEVER",
280
+ "learning_rate": null,
281
+ "weight_dtype": "FLOAT_32",
282
+ "dropout_probability": 0.0,
283
+ "train_embedding": true,
284
+ "attention_mask": false,
285
+ "guidance_scale": 1.0
286
+ },
287
+ "decoder": {
288
+ "__version": 0,
289
+ "model_name": "",
290
+ "include": true,
291
+ "train": true,
292
+ "stop_training_after": null,
293
+ "stop_training_after_unit": "NEVER",
294
+ "learning_rate": null,
295
+ "weight_dtype": "FLOAT_32",
296
+ "dropout_probability": 0.0,
297
+ "train_embedding": true,
298
+ "attention_mask": false,
299
+ "guidance_scale": 1.0
300
+ },
301
+ "decoder_text_encoder": {
302
+ "__version": 0,
303
+ "model_name": "",
304
+ "include": true,
305
+ "train": true,
306
+ "stop_training_after": null,
307
+ "stop_training_after_unit": "NEVER",
308
+ "learning_rate": null,
309
+ "weight_dtype": "FLOAT_32",
310
+ "dropout_probability": 0.0,
311
+ "train_embedding": true,
312
+ "attention_mask": false,
313
+ "guidance_scale": 1.0
314
+ },
315
+ "decoder_vqgan": {
316
+ "__version": 0,
317
+ "model_name": "",
318
+ "include": true,
319
+ "train": true,
320
+ "stop_training_after": null,
321
+ "stop_training_after_unit": "NEVER",
322
+ "learning_rate": null,
323
+ "weight_dtype": "FLOAT_32",
324
+ "dropout_probability": 0.0,
325
+ "train_embedding": true,
326
+ "attention_mask": false,
327
+ "guidance_scale": 1.0
328
+ },
329
+ "masked_training": false,
330
+ "unmasked_probability": 0.1,
331
+ "unmasked_weight": 0.1,
332
+ "normalize_masked_area_loss": false,
333
+ "masked_prior_preservation_weight": 0.0,
334
+ "custom_conditioning_image": false,
335
+ "layer_filter": "transformer_blocks",
336
+ "layer_filter_preset": "transformer_block",
337
+ "layer_filter_regex": false,
338
+ "embedding_learning_rate": null,
339
+ "preserve_embedding_norm": false,
340
+ "embedding": {
341
+ "__version": 0,
342
+ "uuid": "32cefb93-f7d2-4083-be65-f367c7ebccb5",
343
+ "model_name": "",
344
+ "placeholder": "<embedding>",
345
+ "train": true,
346
+ "stop_training_after": null,
347
+ "stop_training_after_unit": "NEVER",
348
+ "token_count": 1,
349
+ "initial_embedding_text": "*",
350
+ "is_output_embedding": false
351
+ },
352
+ "additional_embeddings": [],
353
+ "embedding_weight_dtype": "FLOAT_32",
354
+ "cloud": {
355
+ "__version": 0,
356
+ "enabled": false,
357
+ "type": "RUNPOD",
358
+ "file_sync": "NATIVE_SCP",
359
+ "create": true,
360
+ "name": "OneTrainer",
361
+ "tensorboard_tunnel": true,
362
+ "sub_type": "",
363
+ "gpu_type": "",
364
+ "volume_size": 100,
365
+ "min_download": 0,
366
+ "remote_dir": "/workspace",
367
+ "huggingface_cache_dir": "/workspace/huggingface_cache",
368
+ "onetrainer_dir": "/workspace/OneTrainer",
369
+ "install_cmd": "git clone https://github.com/Nerogar/OneTrainer",
370
+ "install_onetrainer": true,
371
+ "update_onetrainer": true,
372
+ "detach_trainer": false,
373
+ "run_id": "job1",
374
+ "download_samples": true,
375
+ "download_output_model": true,
376
+ "download_saves": true,
377
+ "download_backups": false,
378
+ "download_tensorboard": false,
379
+ "delete_workspace": false,
380
+ "on_finish": "NONE",
381
+ "on_error": "NONE",
382
+ "on_detached_finish": "NONE",
383
+ "on_detached_error": "NONE"
384
+ },
385
+ "peft_type": "LORA",
386
+ "lora_model_name": "",
387
+ "lora_rank": 16,
388
+ "lora_alpha": 1.0,
389
+ "lora_decompose": false,
390
+ "lora_decompose_norm_epsilon": true,
391
+ "lora_decompose_output_axis": false,
392
+ "lora_weight_dtype": "FLOAT_32",
393
+ "bundle_additional_embeddings": true,
394
+ "oft_block_size": 32,
395
+ "oft_coft": false,
396
+ "coft_eps": 0.0001,
397
+ "oft_block_share": false,
398
+ "optimizer": {
399
+ "__version": 0,
400
+ "optimizer": "PRODIGY_ADV",
401
+ "adam_w_mode": false,
402
+ "alpha": 5.0,
403
+ "amsgrad": false,
404
+ "beta1": 0.9,
405
+ "beta2": 0.99,
406
+ "beta3": null,
407
+ "bias_correction": false,
408
+ "block_wise": false,
409
+ "capturable": false,
410
+ "centered": false,
411
+ "clip_threshold": null,
412
+ "d0": 1e-6,
413
+ "d_coef": 1.0,
414
+ "dampening": null,
415
+ "decay_rate": null,
416
+ "decouple": false,
417
+ "differentiable": false,
418
+ "eps": 1e-8,
419
+ "eps2": null,
420
+ "foreach": false,
421
+ "fsdp_in_use": false,
422
+ "fused": false,
423
+ "fused_back_pass": false,
424
+ "growth_rate": "inf",
425
+ "initial_accumulator_value": null,
426
+ "initial_accumulator": null,
427
+ "is_paged": false,
428
+ "log_every": null,
429
+ "lr_decay": null,
430
+ "max_unorm": null,
431
+ "maximize": false,
432
+ "min_8bit_size": null,
433
+ "quant_block_size": null,
434
+ "momentum": null,
435
+ "nesterov": false,
436
+ "no_prox": false,
437
+ "optim_bits": null,
438
+ "percentile_clipping": null,
439
+ "r": null,
440
+ "relative_step": false,
441
+ "safeguard_warmup": false,
442
+ "scale_parameter": false,
443
+ "stochastic_rounding": true,
444
+ "use_bias_correction": false,
445
+ "use_triton": false,
446
+ "warmup_init": false,
447
+ "weight_decay": 0.0,
448
+ "weight_lr_power": null,
449
+ "decoupled_decay": false,
450
+ "fixed_decay": false,
451
+ "rectify": false,
452
+ "degenerated_to_sgd": false,
453
+ "k": null,
454
+ "xi": null,
455
+ "n_sma_threshold": null,
456
+ "ams_bound": false,
457
+ "adanorm": false,
458
+ "adam_debias": false,
459
+ "slice_p": 11,
460
+ "cautious": false,
461
+ "weight_decay_by_lr": true,
462
+ "prodigy_steps": 0,
463
+ "use_speed": false,
464
+ "split_groups": true,
465
+ "split_groups_mean": true,
466
+ "factored": true,
467
+ "factored_fp32": true,
468
+ "use_stableadamw": true,
469
+ "use_cautious": false,
470
+ "use_grams": false,
471
+ "use_adopt": false,
472
+ "d_limiter": false,
473
+ "use_schedulefree": true,
474
+ "use_orthograd": false,
475
+ "nnmf_factor": false,
476
+ "orthogonal_gradient": false,
477
+ "use_atan2": false,
478
+ "use_AdEMAMix": false,
479
+ "beta3_ema": 0.9999,
480
+ "alpha_grad": 100.0,
481
+ "beta1_warmup": null,
482
+ "min_beta1": null,
483
+ "Simplified_AdEMAMix": false,
484
+ "cautious_mask": false,
485
+ "grams_moment": false,
486
+ "kourkoutas_beta": false,
487
+ "k_warmup_steps": null,
488
+ "schedulefree_c": null,
489
+ "ns_steps": null,
490
+ "MuonWithAuxAdam": false,
491
+ "muon_hidden_layers": null,
492
+ "muon_adam_regex": false,
493
+ "muon_adam_lr": null,
494
+ "muon_te1_adam_lr": null,
495
+ "muon_te2_adam_lr": null,
496
+ "muon_adam_config": null,
497
+ "rms_rescaling": true,
498
+ "normuon_variant": false,
499
+ "beta2_normuon": null,
500
+ "normuon_eps": null,
501
+ "low_rank_ortho": false,
502
+ "ortho_rank": null,
503
+ "accelerated_ns": false,
504
+ "cautious_wd": false,
505
+ "approx_mars": false,
506
+ "kappa_p": null,
507
+ "auto_kappa_p": false,
508
+ "compile": false
509
+ },
510
+ "optimizer_defaults": {},
511
+ "sample_definition_file_name": "training_samples/samples.json",
512
+ "samples": null,
513
+ "sample_after": 10,
514
+ "sample_after_unit": "MINUTE",
515
+ "sample_skip_first": 0,
516
+ "sample_image_format": "JPG",
517
+ "sample_video_format": "MP4",
518
+ "sample_audio_format": "MP3",
519
+ "samples_to_tensorboard": true,
520
+ "non_ema_sampling": true,
521
+ "backup_after": 30,
522
+ "backup_after_unit": "MINUTE",
523
+ "rolling_backup": true,
524
+ "rolling_backup_count": 2,
525
+ "backup_before_save": true,
526
+ "save_every": 0,
527
+ "save_every_unit": "NEVER",
528
+ "save_skip_first": 0,
529
+ "save_filename_prefix": ""
530
+ }
training-scripts/onetrainer/fk9_template.json ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version": 10,
3
+ "training_method": "LORA",
4
+ "model_type": "FLUX_2",
5
+ "debug_mode": false,
6
+ "debug_dir": "debug",
7
+ "workspace_dir": "workspace/run",
8
+ "cache_dir": "workspace-cache/run",
9
+ "tensorboard": true,
10
+ "tensorboard_expose": false,
11
+ "tensorboard_always_on": false,
12
+ "tensorboard_port": 6006,
13
+ "validation": false,
14
+ "validate_after": 1,
15
+ "validate_after_unit": "EPOCH",
16
+ "continue_last_backup": false,
17
+ "prevent_overwrites": false,
18
+ "include_train_config": "NONE",
19
+ "multi_gpu": false,
20
+ "device_indexes": "",
21
+ "gradient_reduce_precision": "FLOAT_32_STOCHASTIC",
22
+ "fused_gradient_reduce": true,
23
+ "async_gradient_reduce": true,
24
+ "async_gradient_reduce_buffer": 100,
25
+ "base_model_name": "black-forest-labs/FLUX.2-klein-base-9B",
26
+ "output_dtype": "BFLOAT_16",
27
+ "output_model_format": "SAFETENSORS",
28
+ "output_model_destination": "models/fk9_[person_to_train]_v2_onetrainer.safetensors",
29
+ "gradient_checkpointing": "ON",
30
+ "enable_async_offloading": true,
31
+ "enable_activation_offloading": true,
32
+ "layer_offload_fraction": 0.0,
33
+ "force_circular_padding": false,
34
+ "compile": true,
35
+ "concept_file_name": "training_concepts/concepts.json",
36
+ "concepts": [
37
+ {
38
+ "__version": 2,
39
+ "image": {
40
+ "__version": 0,
41
+ "enable_crop_jitter": true,
42
+ "enable_random_flip": false,
43
+ "enable_fixed_flip": false,
44
+ "enable_random_rotate": false,
45
+ "enable_fixed_rotate": false,
46
+ "random_rotate_max_angle": 0.0,
47
+ "enable_random_brightness": false,
48
+ "enable_fixed_brightness": false,
49
+ "random_brightness_max_strength": 0.0,
50
+ "enable_random_contrast": false,
51
+ "enable_fixed_contrast": false,
52
+ "random_contrast_max_strength": 0.0,
53
+ "enable_random_saturation": false,
54
+ "enable_fixed_saturation": false,
55
+ "random_saturation_max_strength": 0.0,
56
+ "enable_random_hue": false,
57
+ "enable_fixed_hue": false,
58
+ "random_hue_max_strength": 0.0,
59
+ "enable_resolution_override": false,
60
+ "resolution_override": "512",
61
+ "enable_random_circular_mask_shrink": false,
62
+ "enable_random_mask_rotate_crop": false
63
+ },
64
+ "text": {
65
+ "__version": 0,
66
+ "prompt_source": "sample",
67
+ "prompt_path": "",
68
+ "enable_tag_shuffling": false,
69
+ "tag_delimiter": ",",
70
+ "keep_tags_count": 1,
71
+ "tag_dropout_enable": false,
72
+ "tag_dropout_mode": "FULL",
73
+ "tag_dropout_probability": 0.0,
74
+ "tag_dropout_special_tags_mode": "NONE",
75
+ "tag_dropout_special_tags": "",
76
+ "tag_dropout_special_tags_regex": false,
77
+ "caps_randomize_enable": false,
78
+ "caps_randomize_mode": "capslock, title, first, random",
79
+ "caps_randomize_probability": 0.0,
80
+ "caps_randomize_lowercase": false
81
+ },
82
+ "name": "",
83
+ "path": "C:/Development/ai-toolkit/datasets/[person_to_train]",
84
+ "seed": -469069486,
85
+ "enabled": true,
86
+ "type": "STANDARD",
87
+ "include_subdirectories": false,
88
+ "image_variations": 1,
89
+ "text_variations": 1,
90
+ "balancing": 1.0,
91
+ "balancing_strategy": "REPEATS",
92
+ "loss_weight": 1.0,
93
+ "concept_stats": {}
94
+ }
95
+ ],
96
+ "aspect_ratio_bucketing": true,
97
+ "latent_caching": true,
98
+ "clear_cache_before_training": true,
99
+ "learning_rate_scheduler": "CONSTANT",
100
+ "custom_learning_rate_scheduler": null,
101
+ "scheduler_params": [],
102
+ "learning_rate": 3e-5,
103
+ "learning_rate_warmup_steps": 200.0,
104
+ "learning_rate_cycles": 1.0,
105
+ "learning_rate_min_factor": 0.0,
106
+ "epochs": 100,
107
+ "batch_size": 2,
108
+ "gradient_accumulation_steps": 1,
109
+ "ema": "OFF",
110
+ "ema_decay": 0.999,
111
+ "ema_update_step_interval": 5,
112
+ "dataloader_threads": 1,
113
+ "train_device": "cuda",
114
+ "temp_device": "cpu",
115
+ "train_dtype": "BFLOAT_16",
116
+ "fallback_train_dtype": "BFLOAT_16",
117
+ "enable_autocast_cache": true,
118
+ "only_cache": false,
119
+ "resolution": "512",
120
+ "frames": "25",
121
+ "mse_strength": 1.0,
122
+ "mae_strength": 0.0,
123
+ "log_cosh_strength": 0.0,
124
+ "huber_strength": 0.0,
125
+ "huber_delta": 1.0,
126
+ "vb_loss_strength": 1.0,
127
+ "loss_weight_fn": "CONSTANT",
128
+ "loss_weight_strength": 5.0,
129
+ "dropout_probability": 0.0,
130
+ "loss_scaler": "NONE",
131
+ "learning_rate_scaler": "NONE",
132
+ "clip_grad_norm": 1.0,
133
+ "offset_noise_weight": 0.0,
134
+ "generalized_offset_noise": false,
135
+ "perturbation_noise_weight": 0.0,
136
+ "rescale_noise_scheduler_to_zero_terminal_snr": false,
137
+ "force_v_prediction": false,
138
+ "force_epsilon_prediction": false,
139
+ "min_noising_strength": 0.0,
140
+ "max_noising_strength": 1.0,
141
+ "timestep_distribution": "LOGIT_NORMAL",
142
+ "noising_weight": 0.0,
143
+ "noising_bias": 0.0,
144
+ "timestep_shift": 1.0,
145
+ "dynamic_timestep_shifting": false,
146
+ "unet": {
147
+ "__version": 0,
148
+ "model_name": "",
149
+ "include": true,
150
+ "train": true,
151
+ "stop_training_after": 0,
152
+ "stop_training_after_unit": "NEVER",
153
+ "learning_rate": null,
154
+ "weight_dtype": "FLOAT_32",
155
+ "dropout_probability": 0.0,
156
+ "train_embedding": true,
157
+ "attention_mask": false,
158
+ "guidance_scale": 1.0
159
+ },
160
+ "prior": {
161
+ "__version": 0,
162
+ "model_name": "",
163
+ "include": true,
164
+ "train": true,
165
+ "stop_training_after": 0,
166
+ "stop_training_after_unit": "NEVER",
167
+ "learning_rate": null,
168
+ "weight_dtype": "FLOAT_32",
169
+ "dropout_probability": 0.0,
170
+ "train_embedding": true,
171
+ "attention_mask": false,
172
+ "guidance_scale": 1.0
173
+ },
174
+ "transformer": {
175
+ "__version": 0,
176
+ "model_name": "",
177
+ "include": true,
178
+ "train": true,
179
+ "stop_training_after": 0,
180
+ "stop_training_after_unit": "NEVER",
181
+ "learning_rate": null,
182
+ "weight_dtype": "INT_W8A8",
183
+ "dropout_probability": 0.0,
184
+ "train_embedding": true,
185
+ "attention_mask": false,
186
+ "guidance_scale": 1.0
187
+ },
188
+ "quantization": {
189
+ "__version": 0,
190
+ "layer_filter": "transformer_block",
191
+ "layer_filter_preset": "blocks",
192
+ "layer_filter_regex": false,
193
+ "svd_dtype": "NONE",
194
+ "svd_rank": 16,
195
+ "cache_dir": "workspace-cache/run/quantization"
196
+ },
197
+ "text_encoder": {
198
+ "__version": 0,
199
+ "model_name": "",
200
+ "include": true,
201
+ "train": false,
202
+ "stop_training_after": 30,
203
+ "stop_training_after_unit": "EPOCH",
204
+ "learning_rate": null,
205
+ "weight_dtype": "FLOAT_8",
206
+ "dropout_probability": 0.0,
207
+ "train_embedding": true,
208
+ "attention_mask": false,
209
+ "guidance_scale": 1.0
210
+ },
211
+ "text_encoder_layer_skip": 0,
212
+ "text_encoder_sequence_length": 512,
213
+ "text_encoder_2": {
214
+ "__version": 0,
215
+ "model_name": "",
216
+ "include": true,
217
+ "train": true,
218
+ "stop_training_after": 30,
219
+ "stop_training_after_unit": "EPOCH",
220
+ "learning_rate": null,
221
+ "weight_dtype": "FLOAT_32",
222
+ "dropout_probability": 0.0,
223
+ "train_embedding": true,
224
+ "attention_mask": false,
225
+ "guidance_scale": 1.0
226
+ },
227
+ "text_encoder_2_layer_skip": 0,
228
+ "text_encoder_2_sequence_length": 77,
229
+ "text_encoder_3": {
230
+ "__version": 0,
231
+ "model_name": "",
232
+ "include": true,
233
+ "train": true,
234
+ "stop_training_after": 30,
235
+ "stop_training_after_unit": "EPOCH",
236
+ "learning_rate": null,
237
+ "weight_dtype": "FLOAT_32",
238
+ "dropout_probability": 0.0,
239
+ "train_embedding": true,
240
+ "attention_mask": false,
241
+ "guidance_scale": 1.0
242
+ },
243
+ "text_encoder_3_layer_skip": 0,
244
+ "text_encoder_4": {
245
+ "__version": 0,
246
+ "model_name": "",
247
+ "include": true,
248
+ "train": true,
249
+ "stop_training_after": 30,
250
+ "stop_training_after_unit": "EPOCH",
251
+ "learning_rate": null,
252
+ "weight_dtype": "FLOAT_32",
253
+ "dropout_probability": 0.0,
254
+ "train_embedding": true,
255
+ "attention_mask": false,
256
+ "guidance_scale": 1.0
257
+ },
258
+ "text_encoder_4_layer_skip": 0,
259
+ "vae": {
260
+ "__version": 0,
261
+ "model_name": "",
262
+ "include": true,
263
+ "train": true,
264
+ "stop_training_after": null,
265
+ "stop_training_after_unit": "NEVER",
266
+ "learning_rate": null,
267
+ "weight_dtype": "FLOAT_32",
268
+ "dropout_probability": 0.0,
269
+ "train_embedding": true,
270
+ "attention_mask": false,
271
+ "guidance_scale": 1.0
272
+ },
273
+ "effnet_encoder": {
274
+ "__version": 0,
275
+ "model_name": "",
276
+ "include": true,
277
+ "train": true,
278
+ "stop_training_after": null,
279
+ "stop_training_after_unit": "NEVER",
280
+ "learning_rate": null,
281
+ "weight_dtype": "FLOAT_32",
282
+ "dropout_probability": 0.0,
283
+ "train_embedding": true,
284
+ "attention_mask": false,
285
+ "guidance_scale": 1.0
286
+ },
287
+ "decoder": {
288
+ "__version": 0,
289
+ "model_name": "",
290
+ "include": true,
291
+ "train": true,
292
+ "stop_training_after": null,
293
+ "stop_training_after_unit": "NEVER",
294
+ "learning_rate": null,
295
+ "weight_dtype": "FLOAT_32",
296
+ "dropout_probability": 0.0,
297
+ "train_embedding": true,
298
+ "attention_mask": false,
299
+ "guidance_scale": 1.0
300
+ },
301
+ "decoder_text_encoder": {
302
+ "__version": 0,
303
+ "model_name": "",
304
+ "include": true,
305
+ "train": true,
306
+ "stop_training_after": null,
307
+ "stop_training_after_unit": "NEVER",
308
+ "learning_rate": null,
309
+ "weight_dtype": "FLOAT_32",
310
+ "dropout_probability": 0.0,
311
+ "train_embedding": true,
312
+ "attention_mask": false,
313
+ "guidance_scale": 1.0
314
+ },
315
+ "decoder_vqgan": {
316
+ "__version": 0,
317
+ "model_name": "",
318
+ "include": true,
319
+ "train": true,
320
+ "stop_training_after": null,
321
+ "stop_training_after_unit": "NEVER",
322
+ "learning_rate": null,
323
+ "weight_dtype": "FLOAT_32",
324
+ "dropout_probability": 0.0,
325
+ "train_embedding": true,
326
+ "attention_mask": false,
327
+ "guidance_scale": 1.0
328
+ },
329
+ "masked_training": false,
330
+ "unmasked_probability": 0.1,
331
+ "unmasked_weight": 0.1,
332
+ "normalize_masked_area_loss": false,
333
+ "masked_prior_preservation_weight": 0.0,
334
+ "custom_conditioning_image": false,
335
+ "layer_filter": "transformer_blocks",
336
+ "layer_filter_preset": "transformer_block",
337
+ "layer_filter_regex": false,
338
+ "embedding_learning_rate": null,
339
+ "preserve_embedding_norm": false,
340
+ "embedding": {
341
+ "__version": 0,
342
+ "uuid": "32cefb93-f7d2-4083-be65-f367c7ebccb5",
343
+ "model_name": "",
344
+ "placeholder": "<embedding>",
345
+ "train": true,
346
+ "stop_training_after": null,
347
+ "stop_training_after_unit": "NEVER",
348
+ "token_count": 1,
349
+ "initial_embedding_text": "*",
350
+ "is_output_embedding": false
351
+ },
352
+ "additional_embeddings": [],
353
+ "embedding_weight_dtype": "FLOAT_32",
354
+ "cloud": {
355
+ "__version": 0,
356
+ "enabled": false,
357
+ "type": "RUNPOD",
358
+ "file_sync": "NATIVE_SCP",
359
+ "create": true,
360
+ "name": "OneTrainer",
361
+ "tensorboard_tunnel": true,
362
+ "sub_type": "",
363
+ "gpu_type": "",
364
+ "volume_size": 100,
365
+ "min_download": 0,
366
+ "remote_dir": "/workspace",
367
+ "huggingface_cache_dir": "/workspace/huggingface_cache",
368
+ "onetrainer_dir": "/workspace/OneTrainer",
369
+ "install_cmd": "git clone https://github.com/Nerogar/OneTrainer",
370
+ "install_onetrainer": true,
371
+ "update_onetrainer": true,
372
+ "detach_trainer": false,
373
+ "run_id": "job1",
374
+ "download_samples": true,
375
+ "download_output_model": true,
376
+ "download_saves": true,
377
+ "download_backups": false,
378
+ "download_tensorboard": false,
379
+ "delete_workspace": false,
380
+ "on_finish": "NONE",
381
+ "on_error": "NONE",
382
+ "on_detached_finish": "NONE",
383
+ "on_detached_error": "NONE"
384
+ },
385
+ "peft_type": "LORA",
386
+ "lora_model_name": "",
387
+ "lora_rank": 16,
388
+ "lora_alpha": 1.0,
389
+ "lora_decompose": false,
390
+ "lora_decompose_norm_epsilon": true,
391
+ "lora_decompose_output_axis": false,
392
+ "lora_weight_dtype": "FLOAT_32",
393
+ "bundle_additional_embeddings": true,
394
+ "oft_block_size": 32,
395
+ "oft_coft": false,
396
+ "coft_eps": 0.0001,
397
+ "oft_block_share": false,
398
+ "optimizer": {
399
+ "__version": 0,
400
+ "optimizer": "ADAMW",
401
+ "adam_w_mode": false,
402
+ "alpha": null,
403
+ "amsgrad": false,
404
+ "beta1": 0.9,
405
+ "beta2": 0.999,
406
+ "beta3": null,
407
+ "bias_correction": false,
408
+ "block_wise": false,
409
+ "capturable": false,
410
+ "centered": false,
411
+ "clip_threshold": null,
412
+ "d0": null,
413
+ "d_coef": null,
414
+ "dampening": null,
415
+ "decay_rate": null,
416
+ "decouple": false,
417
+ "differentiable": false,
418
+ "eps": 1e-8,
419
+ "eps2": null,
420
+ "foreach": false,
421
+ "fsdp_in_use": false,
422
+ "fused": true,
423
+ "fused_back_pass": false,
424
+ "growth_rate": null,
425
+ "initial_accumulator_value": null,
426
+ "initial_accumulator": null,
427
+ "is_paged": false,
428
+ "log_every": null,
429
+ "lr_decay": null,
430
+ "max_unorm": null,
431
+ "maximize": false,
432
+ "min_8bit_size": null,
433
+ "quant_block_size": null,
434
+ "momentum": null,
435
+ "nesterov": false,
436
+ "no_prox": false,
437
+ "optim_bits": null,
438
+ "percentile_clipping": null,
439
+ "r": null,
440
+ "relative_step": false,
441
+ "safeguard_warmup": false,
442
+ "scale_parameter": false,
443
+ "stochastic_rounding": false,
444
+ "use_bias_correction": false,
445
+ "use_triton": false,
446
+ "warmup_init": false,
447
+ "weight_decay": 0.01,
448
+ "weight_lr_power": null,
449
+ "decoupled_decay": false,
450
+ "fixed_decay": false,
451
+ "rectify": false,
452
+ "degenerated_to_sgd": false,
453
+ "k": null,
454
+ "xi": null,
455
+ "n_sma_threshold": null,
456
+ "ams_bound": false,
457
+ "adanorm": false,
458
+ "adam_debias": false,
459
+ "slice_p": null,
460
+ "cautious": false,
461
+ "weight_decay_by_lr": true,
462
+ "prodigy_steps": null,
463
+ "use_speed": false,
464
+ "split_groups": true,
465
+ "split_groups_mean": true,
466
+ "factored": true,
467
+ "factored_fp32": true,
468
+ "use_stableadamw": true,
469
+ "use_cautious": false,
470
+ "use_grams": false,
471
+ "use_adopt": false,
472
+ "d_limiter": true,
473
+ "use_schedulefree": true,
474
+ "use_orthograd": false,
475
+ "nnmf_factor": false,
476
+ "orthogonal_gradient": false,
477
+ "use_atan2": false,
478
+ "use_AdEMAMix": false,
479
+ "beta3_ema": null,
480
+ "alpha_grad": null,
481
+ "beta1_warmup": null,
482
+ "min_beta1": null,
483
+ "Simplified_AdEMAMix": false,
484
+ "cautious_mask": false,
485
+ "grams_moment": false,
486
+ "kourkoutas_beta": false,
487
+ "k_warmup_steps": null,
488
+ "schedulefree_c": null,
489
+ "ns_steps": null,
490
+ "MuonWithAuxAdam": false,
491
+ "muon_hidden_layers": null,
492
+ "muon_adam_regex": false,
493
+ "muon_adam_lr": null,
494
+ "muon_te1_adam_lr": null,
495
+ "muon_te2_adam_lr": null,
496
+ "muon_adam_config": {},
497
+ "rms_rescaling": true,
498
+ "normuon_variant": false,
499
+ "beta2_normuon": null,
500
+ "normuon_eps": null,
501
+ "low_rank_ortho": false,
502
+ "ortho_rank": null,
503
+ "accelerated_ns": false,
504
+ "cautious_wd": false,
505
+ "approx_mars": false,
506
+ "kappa_p": null,
507
+ "auto_kappa_p": false,
508
+ "compile": false
509
+ },
510
+ "optimizer_defaults": {},
511
+ "sample_definition_file_name": "training_samples/samples.json",
512
+ "samples": null,
513
+ "sample_after": 10,
514
+ "sample_after_unit": "MINUTE",
515
+ "sample_skip_first": 0,
516
+ "sample_image_format": "JPG",
517
+ "sample_video_format": "MP4",
518
+ "sample_audio_format": "MP3",
519
+ "samples_to_tensorboard": true,
520
+ "non_ema_sampling": true,
521
+ "backup_after": 30,
522
+ "backup_after_unit": "MINUTE",
523
+ "rolling_backup": true,
524
+ "rolling_backup_count": 2,
525
+ "backup_before_save": true,
526
+ "save_every": 0,
527
+ "save_every_unit": "NEVER",
528
+ "save_skip_first": 0,
529
+ "save_filename_prefix": ""
530
+ }