End of training
Browse files- README.md +5 -0
- feature_extractor/preprocessor_config.json +1 -1
- logs/dreambooth/1726453675.3313568/events.out.tfevents.1726453675.cmpsrv02.473383.1 +3 -0
- logs/dreambooth/1726453675.3349614/hparams.yml +60 -0
- logs/dreambooth/events.out.tfevents.1726453675.cmpsrv02.473383.0 +3 -0
- model_index.json +2 -2
- safety_checker/config.json +1 -1
- text_encoder/config.json +2 -2
- unet/config.json +1 -1
- unet/diffusion_pytorch_model.safetensors +1 -1
- vae/config.json +1 -1
README.md
CHANGED
|
@@ -8,6 +8,11 @@ tags:
|
|
| 8 |
- diffusers-training
|
| 9 |
- stable-diffusion
|
| 10 |
- stable-diffusion-diffusers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
inference: true
|
| 12 |
instance_prompt: a photo of sks dog
|
| 13 |
---
|
|
|
|
| 8 |
- diffusers-training
|
| 9 |
- stable-diffusion
|
| 10 |
- stable-diffusion-diffusers
|
| 11 |
+
- text-to-image
|
| 12 |
+
- dreambooth
|
| 13 |
+
- diffusers-training
|
| 14 |
+
- stable-diffusion
|
| 15 |
+
- stable-diffusion-diffusers
|
| 16 |
inference: true
|
| 17 |
instance_prompt: a photo of sks dog
|
| 18 |
---
|
feature_extractor/preprocessor_config.json
CHANGED
|
@@ -13,7 +13,7 @@
|
|
| 13 |
0.4578275,
|
| 14 |
0.40821073
|
| 15 |
],
|
| 16 |
-
"image_processor_type": "
|
| 17 |
"image_std": [
|
| 18 |
0.26862954,
|
| 19 |
0.26130258,
|
|
|
|
| 13 |
0.4578275,
|
| 14 |
0.40821073
|
| 15 |
],
|
| 16 |
+
"image_processor_type": "CLIPFeatureExtractor",
|
| 17 |
"image_std": [
|
| 18 |
0.26862954,
|
| 19 |
0.26130258,
|
logs/dreambooth/1726453675.3313568/events.out.tfevents.1726453675.cmpsrv02.473383.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a69dc9ce41f62f9d566c7eb7e0f65fc85d3e7e09ddc746e0c50933f269cc70c7
|
| 3 |
+
size 2742
|
logs/dreambooth/1726453675.3349614/hparams.yml
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
adam_beta1: 0.9
|
| 2 |
+
adam_beta2: 0.999
|
| 3 |
+
adam_epsilon: 1.0e-08
|
| 4 |
+
adam_weight_decay: 0.01
|
| 5 |
+
allow_tf32: false
|
| 6 |
+
center_crop: false
|
| 7 |
+
checkpointing_steps: 500
|
| 8 |
+
checkpoints_total_limit: null
|
| 9 |
+
class_data_dir: null
|
| 10 |
+
class_labels_conditioning: null
|
| 11 |
+
class_prompt: null
|
| 12 |
+
dataloader_num_workers: 0
|
| 13 |
+
enable_xformers_memory_efficient_attention: false
|
| 14 |
+
gradient_accumulation_steps: 1
|
| 15 |
+
gradient_checkpointing: false
|
| 16 |
+
hub_model_id: null
|
| 17 |
+
hub_token: null
|
| 18 |
+
instance_data_dir: ./samples
|
| 19 |
+
instance_prompt: a photo of sks asian girl
|
| 20 |
+
learning_rate: 5.0e-06
|
| 21 |
+
local_rank: 0
|
| 22 |
+
logging_dir: logs
|
| 23 |
+
lr_num_cycles: 1
|
| 24 |
+
lr_power: 1.0
|
| 25 |
+
lr_scheduler: constant
|
| 26 |
+
lr_warmup_steps: 0
|
| 27 |
+
max_grad_norm: 1.0
|
| 28 |
+
max_train_steps: 400
|
| 29 |
+
mixed_precision: null
|
| 30 |
+
num_class_images: 100
|
| 31 |
+
num_train_epochs: 100
|
| 32 |
+
num_validation_images: 4
|
| 33 |
+
offset_noise: false
|
| 34 |
+
output_dir: ./save_path
|
| 35 |
+
pre_compute_text_embeddings: false
|
| 36 |
+
pretrained_model_name_or_path: wyyadd/sd-1.5
|
| 37 |
+
prior_generation_precision: null
|
| 38 |
+
prior_loss_weight: 1.0
|
| 39 |
+
push_to_hub: true
|
| 40 |
+
report_to: tensorboard
|
| 41 |
+
resolution: 512
|
| 42 |
+
resume_from_checkpoint: null
|
| 43 |
+
revision: null
|
| 44 |
+
sample_batch_size: 4
|
| 45 |
+
scale_lr: false
|
| 46 |
+
seed: null
|
| 47 |
+
set_grads_to_none: false
|
| 48 |
+
skip_save_text_encoder: false
|
| 49 |
+
snr_gamma: null
|
| 50 |
+
text_encoder_use_attention_mask: false
|
| 51 |
+
tokenizer_max_length: null
|
| 52 |
+
tokenizer_name: null
|
| 53 |
+
train_batch_size: 1
|
| 54 |
+
train_text_encoder: false
|
| 55 |
+
use_8bit_adam: false
|
| 56 |
+
validation_prompt: null
|
| 57 |
+
validation_scheduler: DPMSolverMultistepScheduler
|
| 58 |
+
validation_steps: 100
|
| 59 |
+
variant: null
|
| 60 |
+
with_prior_preservation: false
|
logs/dreambooth/events.out.tfevents.1726453675.cmpsrv02.473383.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:839cc56082576807f16208b4eae0d4814a9fe9a413c9017d35115dada8268ab4
|
| 3 |
+
size 33434
|
model_index.json
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
{
|
| 2 |
"_class_name": "StableDiffusionPipeline",
|
| 3 |
"_diffusers_version": "0.31.0.dev0",
|
| 4 |
-
"_name_or_path": "
|
| 5 |
"feature_extractor": [
|
| 6 |
"transformers",
|
| 7 |
-
"
|
| 8 |
],
|
| 9 |
"image_encoder": [
|
| 10 |
null,
|
|
|
|
| 1 |
{
|
| 2 |
"_class_name": "StableDiffusionPipeline",
|
| 3 |
"_diffusers_version": "0.31.0.dev0",
|
| 4 |
+
"_name_or_path": "wyyadd/sd-1.5",
|
| 5 |
"feature_extractor": [
|
| 6 |
"transformers",
|
| 7 |
+
"CLIPFeatureExtractor"
|
| 8 |
],
|
| 9 |
"image_encoder": [
|
| 10 |
null,
|
safety_checker/config.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "/u/home/tangp/.cache/huggingface/hub/models--
|
| 3 |
"architectures": [
|
| 4 |
"StableDiffusionSafetyChecker"
|
| 5 |
],
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "/u/home/tangp/.cache/huggingface/hub/models--wyyadd--sd-1.5/snapshots/0abff5314012e69e9d1fb5be188cd1e340e0ae3f/safety_checker",
|
| 3 |
"architectures": [
|
| 4 |
"StableDiffusionSafetyChecker"
|
| 5 |
],
|
text_encoder/config.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "
|
| 3 |
"architectures": [
|
| 4 |
"CLIPTextModel"
|
| 5 |
],
|
|
@@ -18,7 +18,7 @@
|
|
| 18 |
"num_attention_heads": 12,
|
| 19 |
"num_hidden_layers": 12,
|
| 20 |
"pad_token_id": 1,
|
| 21 |
-
"projection_dim":
|
| 22 |
"torch_dtype": "float16",
|
| 23 |
"transformers_version": "4.44.2",
|
| 24 |
"vocab_size": 49408
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "wyyadd/sd-1.5",
|
| 3 |
"architectures": [
|
| 4 |
"CLIPTextModel"
|
| 5 |
],
|
|
|
|
| 18 |
"num_attention_heads": 12,
|
| 19 |
"num_hidden_layers": 12,
|
| 20 |
"pad_token_id": 1,
|
| 21 |
+
"projection_dim": 768,
|
| 22 |
"torch_dtype": "float16",
|
| 23 |
"transformers_version": "4.44.2",
|
| 24 |
"vocab_size": 49408
|
unet/config.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
{
|
| 2 |
"_class_name": "UNet2DConditionModel",
|
| 3 |
"_diffusers_version": "0.31.0.dev0",
|
| 4 |
-
"_name_or_path": "
|
| 5 |
"act_fn": "silu",
|
| 6 |
"addition_embed_type": null,
|
| 7 |
"addition_embed_type_num_heads": 64,
|
|
|
|
| 1 |
{
|
| 2 |
"_class_name": "UNet2DConditionModel",
|
| 3 |
"_diffusers_version": "0.31.0.dev0",
|
| 4 |
+
"_name_or_path": "wyyadd/sd-1.5",
|
| 5 |
"act_fn": "silu",
|
| 6 |
"addition_embed_type": null,
|
| 7 |
"addition_embed_type_num_heads": 64,
|
unet/diffusion_pytorch_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 3438167536
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb13af3736f736ade9b41531e11dd9d07375b3d000c59798d8cf831457e5a7ce
|
| 3 |
size 3438167536
|
vae/config.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
{
|
| 2 |
"_class_name": "AutoencoderKL",
|
| 3 |
"_diffusers_version": "0.31.0.dev0",
|
| 4 |
-
"_name_or_path": "/u/home/tangp/.cache/huggingface/hub/models--
|
| 5 |
"act_fn": "silu",
|
| 6 |
"block_out_channels": [
|
| 7 |
128,
|
|
|
|
| 1 |
{
|
| 2 |
"_class_name": "AutoencoderKL",
|
| 3 |
"_diffusers_version": "0.31.0.dev0",
|
| 4 |
+
"_name_or_path": "/u/home/tangp/.cache/huggingface/hub/models--wyyadd--sd-1.5/snapshots/0abff5314012e69e9d1fb5be188cd1e340e0ae3f/vae",
|
| 5 |
"act_fn": "silu",
|
| 6 |
"block_out_channels": [
|
| 7 |
128,
|