more organization
Browse files- audiogen_medium β obsolete/audiogen_medium +0 -0
- papers_please β obsolete/papers_please +0 -0
- password β obsolete/password +0 -0
- steal_sdscripts_metadata β obsolete/steal_sdscripts_metadata +103 -103
- kade-horny-sample-prompts.txt β sample-prompts/kade-horny-sample-prompts.txt +0 -0
- kade-sample-prompts-sd35.txt β sample-prompts/kade-sample-prompts-sd35.txt +0 -0
- kade-sample-prompts.txt β sample-prompts/kade-sample-prompts.txt +0 -0
audiogen_medium β obsolete/audiogen_medium
RENAMED
File without changes
|
papers_please β obsolete/papers_please
RENAMED
File without changes
|
password β obsolete/password
RENAMED
File without changes
|
steal_sdscripts_metadata β obsolete/steal_sdscripts_metadata
RENAMED
@@ -1,103 +1,103 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
|
4 |
-
"""
|
5 |
-
This script automates the process of updating a Stable Diffusion training
|
6 |
-
script with settings extracted from a LoRA model's JSON metadata.
|
7 |
-
|
8 |
-
It performs the following main tasks:
|
9 |
-
1. Reads a JSON file containing LoRA model metadata
|
10 |
-
2. Parses an existing Stable Diffusion training script
|
11 |
-
3. Maps metadata keys to corresponding script arguments
|
12 |
-
4. Updates the script with values from the metadata
|
13 |
-
5. Handles special cases and complex arguments (e.g., network_args)
|
14 |
-
6. Writes the updated script to a new file
|
15 |
-
|
16 |
-
Usage:
|
17 |
-
python steal_sdscripts_metadata <metadata_file> <script_file> <output_file>
|
18 |
-
|
19 |
-
This tool is particularly useful for replicating training conditions or
|
20 |
-
fine-tuning existing models based on successful previous runs.
|
21 |
-
"""
|
22 |
-
|
23 |
-
import json
|
24 |
-
import re
|
25 |
-
import argparse
|
26 |
-
|
27 |
-
# Parse command-line arguments
|
28 |
-
parser = argparse.ArgumentParser(
|
29 |
-
description='Update training script based on metadata.'
|
30 |
-
)
|
31 |
-
parser.add_argument(
|
32 |
-
'metadata_file', type=str, help='Path to the metadata JSON file'
|
33 |
-
)
|
34 |
-
parser.add_argument(
|
35 |
-
'script_file', type=str, help='Path to the training script file'
|
36 |
-
)
|
37 |
-
parser.add_argument(
|
38 |
-
'output_file', type=str, help='Path to save the updated training script'
|
39 |
-
)
|
40 |
-
args = parser.parse_args()
|
41 |
-
|
42 |
-
# Read the metadata JSON file
|
43 |
-
with open(args.metadata_file, 'r', encoding='utf-8') as f:
|
44 |
-
metadata = json.load(f)
|
45 |
-
|
46 |
-
# Read the training script
|
47 |
-
with open(args.script_file, 'r', encoding='utf-8') as f:
|
48 |
-
script_content = f.read()
|
49 |
-
|
50 |
-
# Define mappings between JSON keys and script arguments
|
51 |
-
mappings = {
|
52 |
-
'ss_network_dim': '--network_dim',
|
53 |
-
'ss_network_alpha': '--network_alpha',
|
54 |
-
'ss_learning_rate': '--learning_rate',
|
55 |
-
'ss_unet_lr': '--unet_lr',
|
56 |
-
'ss_text_encoder_lr': '--text_encoder_lr',
|
57 |
-
'ss_max_train_steps': '--max_train_steps',
|
58 |
-
'ss_train_batch_size': '--train_batch_size',
|
59 |
-
'ss_gradient_accumulation_steps': '--gradient_accumulation_steps',
|
60 |
-
'ss_mixed_precision': '--mixed_precision',
|
61 |
-
'ss_seed': '--seed',
|
62 |
-
'ss_resolution': '--resolution',
|
63 |
-
'ss_clip_skip': '--clip_skip',
|
64 |
-
'ss_lr_scheduler': '--lr_scheduler',
|
65 |
-
'ss_network_module': '--network_module',
|
66 |
-
}
|
67 |
-
|
68 |
-
# Update script content based on metadata
|
69 |
-
for json_key, script_arg in mappings.items():
|
70 |
-
if json_key in metadata:
|
71 |
-
value = metadata[json_key]
|
72 |
-
|
73 |
-
# Handle special cases
|
74 |
-
if json_key == 'ss_resolution':
|
75 |
-
value = f'"{value[1:-1]}"' # Remove parentheses and add quotes
|
76 |
-
elif isinstance(value, str):
|
77 |
-
value = f'"{value}"'
|
78 |
-
|
79 |
-
# Replace or add the argument in the script
|
80 |
-
pattern = f'{script_arg}=\\S+'
|
81 |
-
replacement = f'{script_arg}={value}'
|
82 |
-
if re.search(pattern, script_content):
|
83 |
-
script_content = re.sub(pattern, replacement, script_content)
|
84 |
-
else:
|
85 |
-
script_content = script_content.replace(
|
86 |
-
'args=(', f'args=(\n {replacement}'
|
87 |
-
)
|
88 |
-
|
89 |
-
# Handle network_args separately as it's more complex
|
90 |
-
if 'ss_network_args' in metadata:
|
91 |
-
network_args = metadata['ss_network_args']
|
92 |
-
NETWORK_ARGS_STR = ' '.join(
|
93 |
-
[f'"{k}={v}"' for k, v in network_args.items()]
|
94 |
-
)
|
95 |
-
PATTERN = r'--network_args(\s+".+")+'
|
96 |
-
replacement = f'--network_args\n {NETWORK_ARGS_STR}'
|
97 |
-
script_content = re.sub(PATTERN, replacement, script_content)
|
98 |
-
|
99 |
-
# Write the updated script
|
100 |
-
with open(args.output_file, 'w', encoding='utf-8') as f:
|
101 |
-
f.write(script_content)
|
102 |
-
|
103 |
-
print(f"Updated training script has been saved as '{args.output_file}'")
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
"""
|
5 |
+
This script automates the process of updating a Stable Diffusion training
|
6 |
+
script with settings extracted from a LoRA model's JSON metadata.
|
7 |
+
|
8 |
+
It performs the following main tasks:
|
9 |
+
1. Reads a JSON file containing LoRA model metadata
|
10 |
+
2. Parses an existing Stable Diffusion training script
|
11 |
+
3. Maps metadata keys to corresponding script arguments
|
12 |
+
4. Updates the script with values from the metadata
|
13 |
+
5. Handles special cases and complex arguments (e.g., network_args)
|
14 |
+
6. Writes the updated script to a new file
|
15 |
+
|
16 |
+
Usage:
|
17 |
+
python steal_sdscripts_metadata <metadata_file> <script_file> <output_file>
|
18 |
+
|
19 |
+
This tool is particularly useful for replicating training conditions or
|
20 |
+
fine-tuning existing models based on successful previous runs.
|
21 |
+
"""
|
22 |
+
|
23 |
+
import json
|
24 |
+
import re
|
25 |
+
import argparse
|
26 |
+
|
27 |
+
# Parse command-line arguments
|
28 |
+
parser = argparse.ArgumentParser(
|
29 |
+
description='Update training script based on metadata.'
|
30 |
+
)
|
31 |
+
parser.add_argument(
|
32 |
+
'metadata_file', type=str, help='Path to the metadata JSON file'
|
33 |
+
)
|
34 |
+
parser.add_argument(
|
35 |
+
'script_file', type=str, help='Path to the training script file'
|
36 |
+
)
|
37 |
+
parser.add_argument(
|
38 |
+
'output_file', type=str, help='Path to save the updated training script'
|
39 |
+
)
|
40 |
+
args = parser.parse_args()
|
41 |
+
|
42 |
+
# Read the metadata JSON file
|
43 |
+
with open(args.metadata_file, 'r', encoding='utf-8') as f:
|
44 |
+
metadata = json.load(f)
|
45 |
+
|
46 |
+
# Read the training script
|
47 |
+
with open(args.script_file, 'r', encoding='utf-8') as f:
|
48 |
+
script_content = f.read()
|
49 |
+
|
50 |
+
# Define mappings between JSON keys and script arguments
|
51 |
+
mappings = {
|
52 |
+
'ss_network_dim': '--network_dim',
|
53 |
+
'ss_network_alpha': '--network_alpha',
|
54 |
+
'ss_learning_rate': '--learning_rate',
|
55 |
+
'ss_unet_lr': '--unet_lr',
|
56 |
+
'ss_text_encoder_lr': '--text_encoder_lr',
|
57 |
+
'ss_max_train_steps': '--max_train_steps',
|
58 |
+
'ss_train_batch_size': '--train_batch_size',
|
59 |
+
'ss_gradient_accumulation_steps': '--gradient_accumulation_steps',
|
60 |
+
'ss_mixed_precision': '--mixed_precision',
|
61 |
+
'ss_seed': '--seed',
|
62 |
+
'ss_resolution': '--resolution',
|
63 |
+
'ss_clip_skip': '--clip_skip',
|
64 |
+
'ss_lr_scheduler': '--lr_scheduler',
|
65 |
+
'ss_network_module': '--network_module',
|
66 |
+
}
|
67 |
+
|
68 |
+
# Update script content based on metadata
|
69 |
+
for json_key, script_arg in mappings.items():
|
70 |
+
if json_key in metadata:
|
71 |
+
value = metadata[json_key]
|
72 |
+
|
73 |
+
# Handle special cases
|
74 |
+
if json_key == 'ss_resolution':
|
75 |
+
value = f'"{value[1:-1]}"' # Remove parentheses and add quotes
|
76 |
+
elif isinstance(value, str):
|
77 |
+
value = f'"{value}"'
|
78 |
+
|
79 |
+
# Replace or add the argument in the script
|
80 |
+
pattern = f'{script_arg}=\\S+'
|
81 |
+
replacement = f'{script_arg}={value}'
|
82 |
+
if re.search(pattern, script_content):
|
83 |
+
script_content = re.sub(pattern, replacement, script_content)
|
84 |
+
else:
|
85 |
+
script_content = script_content.replace(
|
86 |
+
'args=(', f'args=(\n {replacement}'
|
87 |
+
)
|
88 |
+
|
89 |
+
# Handle network_args separately as it's more complex
|
90 |
+
if 'ss_network_args' in metadata:
|
91 |
+
network_args = metadata['ss_network_args']
|
92 |
+
NETWORK_ARGS_STR = ' '.join(
|
93 |
+
[f'"{k}={v}"' for k, v in network_args.items()]
|
94 |
+
)
|
95 |
+
PATTERN = r'--network_args(\s+".+")+'
|
96 |
+
replacement = f'--network_args\n {NETWORK_ARGS_STR}'
|
97 |
+
script_content = re.sub(PATTERN, replacement, script_content)
|
98 |
+
|
99 |
+
# Write the updated script
|
100 |
+
with open(args.output_file, 'w', encoding='utf-8') as f:
|
101 |
+
f.write(script_content)
|
102 |
+
|
103 |
+
print(f"Updated training script has been saved as '{args.output_file}'")
|
kade-horny-sample-prompts.txt β sample-prompts/kade-horny-sample-prompts.txt
RENAMED
File without changes
|
kade-sample-prompts-sd35.txt β sample-prompts/kade-sample-prompts-sd35.txt
RENAMED
File without changes
|
kade-sample-prompts.txt β sample-prompts/kade-sample-prompts.txt
RENAMED
File without changes
|