End of training
Browse files- .gitattributes +1 -0
- README.md +20 -0
- image_0.png +3 -0
- logs/text2image-fine-tune/1704365635.431247/events.out.tfevents.1704365635.98a18e383ab0.6962.1 +3 -0
- logs/text2image-fine-tune/1704365635.4320903/hparams.yml +53 -0
- logs/text2image-fine-tune/1704366485.1579063/events.out.tfevents.1704366485.98a18e383ab0.14696.1 +3 -0
- logs/text2image-fine-tune/1704366485.1589582/hparams.yml +53 -0
- logs/text2image-fine-tune/1704366882.7081184/events.out.tfevents.1704366882.98a18e383ab0.18259.1 +3 -0
- logs/text2image-fine-tune/1704366882.7091117/hparams.yml +53 -0
- logs/text2image-fine-tune/1704367032.363612/events.out.tfevents.1704367032.98a18e383ab0.19727.1 +3 -0
- logs/text2image-fine-tune/1704367032.3646586/hparams.yml +53 -0
- logs/text2image-fine-tune/1704367550.8422213/events.out.tfevents.1704367550.98a18e383ab0.24490.1 +3 -0
- logs/text2image-fine-tune/1704367550.8432975/hparams.yml +53 -0
- logs/text2image-fine-tune/1704368031.0878558/events.out.tfevents.1704368031.98a18e383ab0.28733.1 +3 -0
- logs/text2image-fine-tune/1704368031.0885866/hparams.yml +53 -0
- logs/text2image-fine-tune/1704368183.4810724/events.out.tfevents.1704368183.98a18e383ab0.30280.1 +3 -0
- logs/text2image-fine-tune/1704368183.4820576/hparams.yml +52 -0
- logs/text2image-fine-tune/1704368304.9494638/events.out.tfevents.1704368304.98a18e383ab0.31446.1 +3 -0
- logs/text2image-fine-tune/1704368304.9504998/hparams.yml +52 -0
- logs/text2image-fine-tune/1704371682.5524082/events.out.tfevents.1704371682.98a18e383ab0.58899.1 +3 -0
- logs/text2image-fine-tune/1704371682.5534642/hparams.yml +52 -0
- logs/text2image-fine-tune/events.out.tfevents.1704365635.98a18e383ab0.6962.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704366485.98a18e383ab0.14696.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704366882.98a18e383ab0.18259.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704367032.98a18e383ab0.19727.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704367550.98a18e383ab0.24490.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704368031.98a18e383ab0.28733.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704368183.98a18e383ab0.30280.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704368304.98a18e383ab0.31446.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1704371682.98a18e383ab0.58899.0 +3 -0
- pytorch_lora_weights.safetensors +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
image_0.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
---
|
3 |
+
tags:
|
4 |
+
- stable-diffusion-xl
|
5 |
+
- stable-diffusion-xl-diffusers
|
6 |
+
- text-to-image
|
7 |
+
- diffusers
|
8 |
+
- lora
|
9 |
+
inference: true
|
10 |
+
---
|
11 |
+
|
12 |
+
# LoRA text2image fine-tuning - armhebb/lora_license-id_style-name
|
13 |
+
|
14 |
+
These are LoRA adaption weights for /sdxl_j. The weights were fine-tuned on the None dataset. You can find some example images in the following.
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
LoRA for the text encoder was enabled: False.
|
19 |
+
|
20 |
+
Special VAE used for training: None.
|
image_0.png
ADDED
Git LFS Details
|
logs/text2image-fine-tune/1704365635.431247/events.out.tfevents.1704365635.98a18e383ab0.6962.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4ae482abc3f00e3895f8166c010d742dd904d11a67d3ff646b11b7b58090c9a8
|
3 |
+
size 2520
|
logs/text2image-fine-tune/1704365635.4320903/hparams.yml
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 300
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 60
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: a beautiful woman wearing a cyberpunk dress, solid white color
|
52 |
+
background, high fashion ad campaign
|
53 |
+
variant: null
|
logs/text2image-fine-tune/1704366485.1579063/events.out.tfevents.1704366485.98a18e383ab0.14696.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d2415eedff8c61cdfb3d7200372844d854a08ce9a1c7347ae4042889f7b87dc
|
3 |
+
size 2520
|
logs/text2image-fine-tune/1704366485.1589582/hparams.yml
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 50
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 10
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: a beautiful woman wearing a cyberpunk dress, solid white color
|
52 |
+
background, high fashion ad campaign
|
53 |
+
variant: null
|
logs/text2image-fine-tune/1704366882.7081184/events.out.tfevents.1704366882.98a18e383ab0.18259.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8fff2c5bbb92ea2ec1c861afdb7f2a310a5809f5f48210ed1844f70e5907f2d
|
3 |
+
size 2528
|
logs/text2image-fine-tune/1704366882.7091117/hparams.yml
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: armhebb/lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 50
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 10
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: a beautiful woman wearing a cyberpunk dress, solid white color
|
52 |
+
background, high fashion ad campaign
|
53 |
+
variant: null
|
logs/text2image-fine-tune/1704367032.363612/events.out.tfevents.1704367032.98a18e383ab0.19727.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83a73e2a4d4619fb6ede2aebbf89a02cd142d7bf5f2e642a444feb920b8e51f7
|
3 |
+
size 2520
|
logs/text2image-fine-tune/1704367032.3646586/hparams.yml
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 50
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 10
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: a beautiful woman wearing a cyberpunk dress, solid white color
|
52 |
+
background, high fashion ad campaign
|
53 |
+
variant: null
|
logs/text2image-fine-tune/1704367550.8422213/events.out.tfevents.1704367550.98a18e383ab0.24490.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b3d521e4d8c7d797bff74bf3dfa7d1c5cfd15286f9ba77f6fb9288a532f962f
|
3 |
+
size 2520
|
logs/text2image-fine-tune/1704367550.8432975/hparams.yml
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 50
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 10
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: a beautiful woman wearing a cyberpunk dress, solid white color
|
52 |
+
background, high fashion ad campaign
|
53 |
+
variant: null
|
logs/text2image-fine-tune/1704368031.0878558/events.out.tfevents.1704368031.98a18e383ab0.28733.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abb8028702bb54541c822b352d5958296e7ab92b45f000ebadc7ec318f38600c
|
3 |
+
size 2520
|
logs/text2image-fine-tune/1704368031.0885866/hparams.yml
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 50
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 10
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: a beautiful woman wearing a cyberpunk dress, solid white color
|
52 |
+
background, high fashion ad campaign
|
53 |
+
variant: null
|
logs/text2image-fine-tune/1704368183.4810724/events.out.tfevents.1704368183.98a18e383ab0.30280.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49cd0cc34a04179f86b45f1693b3553dce87317f596a8aacdf5b90d0343e63d7
|
3 |
+
size 2373
|
logs/text2image-fine-tune/1704368183.4820576/hparams.yml
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 10
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 2
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: null
|
52 |
+
variant: null
|
logs/text2image-fine-tune/1704368304.9494638/events.out.tfevents.1704368304.98a18e383ab0.31446.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1eaabfb7f69ec9795af49f4c0b2d9d1ddceda120252a51e86c3f480bb8d61b58
|
3 |
+
size 2373
|
logs/text2image-fine-tune/1704368304.9504998/hparams.yml
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 10
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 2
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: null
|
52 |
+
variant: null
|
logs/text2image-fine-tune/1704371682.5524082/events.out.tfevents.1704371682.98a18e383ab0.58899.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:99a45b6563224113c3f03fd7e5f2ec0dda6e9864ed92b7a339b3d28f79aa3ef4
|
3 |
+
size 2373
|
logs/text2image-fine-tune/1704371682.5534642/hparams.yml
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 2000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: null
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: lora_license-id_style-name
|
18 |
+
hub_token: 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
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 10
|
28 |
+
mixed_precision: fp16
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 2
|
31 |
+
num_validation_images: 1
|
32 |
+
output_dir: sample_lora_train
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: /sdxl_j
|
35 |
+
pretrained_vae_model_name_or_path: null
|
36 |
+
push_to_hub: true
|
37 |
+
random_flip: true
|
38 |
+
rank: 4
|
39 |
+
report_to: tensorboard
|
40 |
+
resolution: 1024
|
41 |
+
resume_from_checkpoint: null
|
42 |
+
revision: null
|
43 |
+
scale_lr: false
|
44 |
+
seed: 42
|
45 |
+
snr_gamma: null
|
46 |
+
train_batch_size: 1
|
47 |
+
train_data_dir: /sample_dataset
|
48 |
+
train_text_encoder: false
|
49 |
+
use_8bit_adam: false
|
50 |
+
validation_epochs: 1000
|
51 |
+
validation_prompt: null
|
52 |
+
variant: null
|
logs/text2image-fine-tune/events.out.tfevents.1704365635.98a18e383ab0.6962.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6233fe628c58d5fdb2c98e590dbcc51c35d7d3ceaec84bf706b023cfd4b21d43
|
3 |
+
size 653794
|
logs/text2image-fine-tune/events.out.tfevents.1704366485.98a18e383ab0.14696.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85f2f688efae54d4d07dff65c42e1cdc6f994035e12ff440a5f969aeac9ea365
|
3 |
+
size 1100409
|
logs/text2image-fine-tune/events.out.tfevents.1704366882.98a18e383ab0.18259.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51f53e21f8df488a4c2f81674fc559bdf0084dd63d3bc22531105eba9856ae55
|
3 |
+
size 1099311
|
logs/text2image-fine-tune/events.out.tfevents.1704367032.98a18e383ab0.19727.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97dc3d27558abd44a4f677be36e5151b9a4347e40e6c7529208434b9817da459
|
3 |
+
size 1099727
|
logs/text2image-fine-tune/events.out.tfevents.1704367550.98a18e383ab0.24490.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a90a325e131aa8ab68af766460270ad80d7b500f2806895ec7dd15a11a27259
|
3 |
+
size 1098857
|
logs/text2image-fine-tune/events.out.tfevents.1704368031.98a18e383ab0.28733.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37194b66a275f8c1eef66207a50a9b39488f851a1fa77054f01061263e0d4f2b
|
3 |
+
size 1099679
|
logs/text2image-fine-tune/events.out.tfevents.1704368183.98a18e383ab0.30280.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f7f168cea2a177d117c02652106d49f268a60606c0c781868cf3ddad83c6314
|
3 |
+
size 568
|
logs/text2image-fine-tune/events.out.tfevents.1704368304.98a18e383ab0.31446.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d1afd5a2437e51d7d0d061f7df701eeee77e99cc604b9cda487fe27e9c3ae9be
|
3 |
+
size 568
|
logs/text2image-fine-tune/events.out.tfevents.1704371682.98a18e383ab0.58899.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bcf26ce696c6dbd966f603dcdb9f8b2dcd622f6257ad3d09a90cf49c5056625
|
3 |
+
size 568
|
pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfd6199e1e87c46d594ef7edaf43b8edc7db824de5ecd972c40c3317d55da665
|
3 |
+
size 23390424
|