AhmadMustafa
commited on
Commit
•
9ce76ba
1
Parent(s):
21d3a87
End of training
Browse files- .gitattributes +4 -0
- README.md +48 -0
- image_0.png +3 -0
- image_1.png +3 -0
- image_2.png +3 -0
- image_3.png +3 -0
- logs/text2image-fine-tune/1720977830.065001/events.out.tfevents.1720977830.95cb85c33385.8752.1 +3 -0
- logs/text2image-fine-tune/1720977830.0667355/hparams.yml +50 -0
- logs/text2image-fine-tune/1720978215.4643064/events.out.tfevents.1720978215.95cb85c33385.10676.1 +3 -0
- logs/text2image-fine-tune/1720978215.4662023/hparams.yml +54 -0
- logs/text2image-fine-tune/events.out.tfevents.1720977830.95cb85c33385.8752.0 +3 -0
- logs/text2image-fine-tune/events.out.tfevents.1720978215.95cb85c33385.10676.0 +3 -0
- pytorch_lora_weights.safetensors +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
image_0.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
image_1.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
image_2.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
image_3.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: stabilityai/stable-diffusion-xl-base-1.0
|
3 |
+
library_name: diffusers
|
4 |
+
license: creativeml-openrail-m
|
5 |
+
tags:
|
6 |
+
- stable-diffusion-xl
|
7 |
+
- stable-diffusion-xl-diffusers
|
8 |
+
- text-to-image
|
9 |
+
- diffusers
|
10 |
+
- diffusers-training
|
11 |
+
- lora
|
12 |
+
inference: true
|
13 |
+
---
|
14 |
+
|
15 |
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
16 |
+
should probably proofread and complete it, then remove this comment. -->
|
17 |
+
|
18 |
+
|
19 |
+
# LoRA text2image fine-tuning - AhmadMustafa/sdxl-base-flowers-model-lora
|
20 |
+
|
21 |
+
These are LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0. The weights were fine-tuned on the AhmadMustafa/flower-captions-blip dataset. You can find some example images in the following.
|
22 |
+
|
23 |
+
![img_0](./image_0.png)
|
24 |
+
![img_1](./image_1.png)
|
25 |
+
![img_2](./image_2.png)
|
26 |
+
![img_3](./image_3.png)
|
27 |
+
|
28 |
+
|
29 |
+
LoRA for the text encoder was enabled: False.
|
30 |
+
|
31 |
+
Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
|
32 |
+
|
33 |
+
|
34 |
+
## Intended uses & limitations
|
35 |
+
|
36 |
+
#### How to use
|
37 |
+
|
38 |
+
```python
|
39 |
+
# TODO: add an example code snippet for running this diffusion pipeline
|
40 |
+
```
|
41 |
+
|
42 |
+
#### Limitations and bias
|
43 |
+
|
44 |
+
[TODO: provide examples of latent issues and potential remediations]
|
45 |
+
|
46 |
+
## Training details
|
47 |
+
|
48 |
+
[TODO: describe the data used to train the model]
|
image_0.png
ADDED
Git LFS Details
|
image_1.png
ADDED
Git LFS Details
|
image_2.png
ADDED
Git LFS Details
|
image_3.png
ADDED
Git LFS Details
|
logs/text2image-fine-tune/1720977830.065001/events.out.tfevents.1720977830.95cb85c33385.8752.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6808f0cdb20848f1c1604f2994ec7da4e5e19e03cbd9c850eb5efa810103a92a
|
3 |
+
size 2282
|
logs/text2image-fine-tune/1720977830.0667355/hparams.yml
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 5000
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: AhmadMustafa/flower-captions-blip
|
14 |
+
enable_xformers_memory_efficient_attention: false
|
15 |
+
gradient_accumulation_steps: 1
|
16 |
+
gradient_checkpointing: false
|
17 |
+
hub_model_id: null
|
18 |
+
hub_token: null
|
19 |
+
image_column: image
|
20 |
+
learning_rate: 0.0001
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_scheduler: constant
|
24 |
+
lr_warmup_steps: 0
|
25 |
+
max_grad_norm: 1.0
|
26 |
+
max_train_samples: null
|
27 |
+
max_train_steps: 11000
|
28 |
+
mixed_precision: null
|
29 |
+
noise_offset: 0
|
30 |
+
num_train_epochs: 100
|
31 |
+
num_validation_images: 4
|
32 |
+
output_dir: sdxl-base-flowers-model-lora
|
33 |
+
prediction_type: null
|
34 |
+
pretrained_model_name_or_path: stabilityai/stable-diffusion-xl-base-1.0
|
35 |
+
push_to_hub: false
|
36 |
+
random_flip: true
|
37 |
+
rank: 4
|
38 |
+
report_to: tensorboard
|
39 |
+
resolution: 512
|
40 |
+
resume_from_checkpoint: null
|
41 |
+
revision: null
|
42 |
+
scale_lr: false
|
43 |
+
seed: 42
|
44 |
+
snr_gamma: null
|
45 |
+
train_batch_size: 1
|
46 |
+
train_data_dir: null
|
47 |
+
use_8bit_adam: false
|
48 |
+
validation_epochs: 1
|
49 |
+
validation_prompt: a picture of a big black rose in a garden
|
50 |
+
variant: null
|
logs/text2image-fine-tune/1720978215.4643064/events.out.tfevents.1720978215.95cb85c33385.10676.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8fc2469d86c52e1314e6a71d1d7df1d15a4869371d8aaa62eb23223f2c886b16
|
3 |
+
size 2610
|
logs/text2image-fine-tune/1720978215.4662023/hparams.yml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
cache_dir: null
|
7 |
+
caption_column: text
|
8 |
+
center_crop: false
|
9 |
+
checkpointing_steps: 500
|
10 |
+
checkpoints_total_limit: null
|
11 |
+
dataloader_num_workers: 0
|
12 |
+
dataset_config_name: null
|
13 |
+
dataset_name: AhmadMustafa/flower-captions-blip
|
14 |
+
debug_loss: false
|
15 |
+
enable_npu_flash_attention: false
|
16 |
+
enable_xformers_memory_efficient_attention: false
|
17 |
+
gradient_accumulation_steps: 1
|
18 |
+
gradient_checkpointing: false
|
19 |
+
hub_model_id: null
|
20 |
+
hub_token: null
|
21 |
+
image_column: image
|
22 |
+
learning_rate: 0.0001
|
23 |
+
local_rank: -1
|
24 |
+
logging_dir: logs
|
25 |
+
lr_scheduler: constant
|
26 |
+
lr_warmup_steps: 0
|
27 |
+
max_grad_norm: 1.0
|
28 |
+
max_train_samples: null
|
29 |
+
max_train_steps: 220
|
30 |
+
mixed_precision: fp16
|
31 |
+
noise_offset: 0
|
32 |
+
num_train_epochs: 2
|
33 |
+
num_validation_images: 4
|
34 |
+
output_dir: sdxl-base-flowers-model-lora
|
35 |
+
prediction_type: null
|
36 |
+
pretrained_model_name_or_path: stabilityai/stable-diffusion-xl-base-1.0
|
37 |
+
pretrained_vae_model_name_or_path: madebyollin/sdxl-vae-fp16-fix
|
38 |
+
push_to_hub: true
|
39 |
+
random_flip: true
|
40 |
+
rank: 4
|
41 |
+
report_to: tensorboard
|
42 |
+
resolution: 1024
|
43 |
+
resume_from_checkpoint: null
|
44 |
+
revision: null
|
45 |
+
scale_lr: false
|
46 |
+
seed: 42
|
47 |
+
snr_gamma: null
|
48 |
+
train_batch_size: 1
|
49 |
+
train_data_dir: null
|
50 |
+
train_text_encoder: false
|
51 |
+
use_8bit_adam: false
|
52 |
+
validation_epochs: 1
|
53 |
+
validation_prompt: a picture of a big black rose in a garden
|
54 |
+
variant: null
|
logs/text2image-fine-tune/events.out.tfevents.1720977830.95cb85c33385.8752.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e37bd69490eca651a0de4634c08065c7dd994adf66876af83fbbbdfa33b22128
|
3 |
+
size 88
|
logs/text2image-fine-tune/events.out.tfevents.1720978215.95cb85c33385.10676.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a6388ce42e263f440f6e9b4544d60b1b726286750643e185609fc24d2c3cc3f
|
3 |
+
size 15726796
|
pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:785df8ce84874b7ba3a8ea3f6378543df1559565a80582db96af91a35c59d6b5
|
3 |
+
size 23390424
|