EpsilonGreedy commited on
Commit
01a77f6
1 Parent(s): 48a0613

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ images_0.png filter=lfs diff=lfs merge=lfs -text
37
+ images_1.png filter=lfs diff=lfs merge=lfs -text
38
+ images_2.png filter=lfs diff=lfs merge=lfs -text
39
+ images_3.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ library_name: diffusers
4
+ tags:
5
+ - stable-diffusion
6
+ - stable-diffusion-diffusers
7
+ - text-to-image
8
+ - diffusers
9
+ - controlnet
10
+ - diffusers-training
11
+ base_model: runwayml/stable-diffusion-v1-5
12
+ inference: true
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+
19
+ # controlnet-EpsilonGreedy/Clothes2PersonUntrained4e-060
20
+
21
+ These are controlnet weights trained on runwayml/stable-diffusion-v1-5 with new type of conditioning.
22
+ You can find some example images below.
23
+
24
+ prompt: C2PDress, a woman, refracted line and sparkles, bold graphics, suki, barcodes, gliter, round-cropped, flat texture, galactic, palladium, lower part of face, stands in front of a white background
25
+ ![images_0)](./images_0.png)
26
+ prompt: C2PUpperBody, a woman wearing black leather skirt, anthro, minimalist photorealist, t-top, pelisse, a new, features between french, inspired by James Baynes, flat icon, oppai, cut out, very ahestetic, stands in front of a white background
27
+ ![images_1)](./images_1.png)
28
+ prompt: C2PLowerBody, a woman in a black top with a black jacket, elegant cape, wearing a cropped top, 4 arms, posh, cloth sim, inspired by Rose ONeill, listing image, proportions off, bubblegum, lower part of face, stands in front of a white background
29
+ ![images_2)](./images_2.png)
30
+ prompt: C2PUpperBody, a man in tan shorts standing in front of a white wall, discord moderator, round-cropped, summer vibrance, simplified, dividing it into nine quarters, sculls, scandinavian style, nbc, lower part of face, stands in front of a white background
31
+ ![images_3)](./images_3.png)
32
+
33
+
34
+
35
+ ## Intended uses & limitations
36
+
37
+ #### How to use
38
+
39
+ ```python
40
+ # TODO: add an example code snippet for running this diffusion pipeline
41
+ ```
42
+
43
+ #### Limitations and bias
44
+
45
+ [TODO: provide examples of latent issues and potential remediations]
46
+
47
+ ## Training details
48
+
49
+ [TODO: describe the data used to train the model]
checkpoint-2/controlnet/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "class_embed_type": null,
16
+ "conditioning_channels": 3,
17
+ "conditioning_embedding_out_channels": [
18
+ 16,
19
+ 32,
20
+ 96,
21
+ 256
22
+ ],
23
+ "controlnet_conditioning_channel_order": "rgb",
24
+ "cross_attention_dim": 768,
25
+ "down_block_types": [
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "DownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "encoder_hid_dim": null,
33
+ "encoder_hid_dim_type": null,
34
+ "flip_sin_to_cos": true,
35
+ "freq_shift": 0,
36
+ "global_pool_conditions": false,
37
+ "in_channels": 4,
38
+ "layers_per_block": 2,
39
+ "mid_block_scale_factor": 1,
40
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
41
+ "norm_eps": 1e-05,
42
+ "norm_num_groups": 32,
43
+ "num_attention_heads": null,
44
+ "num_class_embeds": null,
45
+ "only_cross_attention": false,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_time_scale_shift": "default",
48
+ "transformer_layers_per_block": 1,
49
+ "upcast_attention": false,
50
+ "use_linear_projection": false
51
+ }
checkpoint-2/controlnet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c797daf492cb9e618130d80e39ac9e4087a5b32c7c08e0d65f37c0fdcf21014
3
+ size 1445157120
checkpoint-2/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c93c5d4daa47639aacab7778e9959baf17ee97786e384e1cfb6c67eaf03c70a
3
+ size 725253498
checkpoint-2/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9667421749d96c532897a0e876d2c6b3bb9f8433d4893d123de77aa862f81bda
3
+ size 14604
checkpoint-2/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bd8b951c51f7817b7131895e165e3c601ab4a6e8087a6b8d82837c799184824
3
+ size 1000
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "class_embed_type": null,
16
+ "conditioning_channels": 3,
17
+ "conditioning_embedding_out_channels": [
18
+ 16,
19
+ 32,
20
+ 96,
21
+ 256
22
+ ],
23
+ "controlnet_conditioning_channel_order": "rgb",
24
+ "cross_attention_dim": 768,
25
+ "down_block_types": [
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "DownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "encoder_hid_dim": null,
33
+ "encoder_hid_dim_type": null,
34
+ "flip_sin_to_cos": true,
35
+ "freq_shift": 0,
36
+ "global_pool_conditions": false,
37
+ "in_channels": 4,
38
+ "layers_per_block": 2,
39
+ "mid_block_scale_factor": 1,
40
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
41
+ "norm_eps": 1e-05,
42
+ "norm_num_groups": 32,
43
+ "num_attention_heads": null,
44
+ "num_class_embeds": null,
45
+ "only_cross_attention": false,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_time_scale_shift": "default",
48
+ "transformer_layers_per_block": 1,
49
+ "upcast_attention": false,
50
+ "use_linear_projection": false
51
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c797daf492cb9e618130d80e39ac9e4087a5b32c7c08e0d65f37c0fdcf21014
3
+ size 1445157120
image_control.png ADDED
images_0.png ADDED

Git LFS Details

  • SHA256: 27cfb911de99b7ab37b9bff7600f3997a56786ccdb5747eaf27fb128a1d49352
  • Pointer size: 132 Bytes
  • Size of remote file: 2.97 MB
images_1.png ADDED

Git LFS Details

  • SHA256: 8b8e250de3e773eb99ef491ddbc69c783ba95b527230e963f8b10c420c40e253
  • Pointer size: 132 Bytes
  • Size of remote file: 2.35 MB
images_2.png ADDED

Git LFS Details

  • SHA256: f177ffcb47e1bc1179ad70ee9a3ba2026354857ac4d756364cb1bd8197ca9dd2
  • Pointer size: 132 Bytes
  • Size of remote file: 2.29 MB
images_3.png ADDED

Git LFS Details

  • SHA256: e0de4147b7078499acd9e894b6a59d8037a1797c77935de92f9560cc86c06134
  • Pointer size: 132 Bytes
  • Size of remote file: 2.56 MB
logs/train_controlnet/1720078310.8012874/events.out.tfevents.1720078310.915fcea12aba.110.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a74c3957e461ec20d361832dc508543bf19d048fc4287a24795fd76efa48db2
3
+ size 2472
logs/train_controlnet/1720078310.8037698/hparams.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ caption_column: text
8
+ checkpointing_steps: 2
9
+ checkpoints_total_limit: null
10
+ conditioning_image_column: conditioning_image
11
+ controlnet_model_name_or_path: null
12
+ dataloader_num_workers: 4
13
+ dataset_config_name: null
14
+ dataset_name: EpsilonGreedy/Clothes2Person
15
+ enable_xformers_memory_efficient_attention: false
16
+ gradient_accumulation_steps: 4
17
+ gradient_checkpointing: true
18
+ hub_model_id: EpsilonGreedy/Clothes2PersonUntrained4e-060
19
+ hub_token: null
20
+ image_column: image
21
+ learning_rate: 4.0e-06
22
+ logging_dir: logs
23
+ lr_num_cycles: 1
24
+ lr_power: 1.0
25
+ lr_scheduler: constant_with_warmup
26
+ lr_warmup_steps: 500
27
+ max_grad_norm: 1.0
28
+ max_train_samples: null
29
+ max_train_steps: 2
30
+ mixed_precision: null
31
+ num_train_epochs: 1
32
+ num_validation_images: 4
33
+ output_dir: /kaggle/working/output/Clothes2Person
34
+ pretrained_model_name_or_path: runwayml/stable-diffusion-v1-5
35
+ proportion_empty_prompts: 0.0
36
+ push_to_hub: true
37
+ report_to: tensorboard
38
+ resolution: 512
39
+ resume_from_checkpoint: latest
40
+ revision: null
41
+ scale_lr: false
42
+ seed: null
43
+ set_grads_to_none: false
44
+ tokenizer_name: null
45
+ tracker_project_name: train_controlnet
46
+ train_batch_size: 3
47
+ train_data_dir: null
48
+ use_8bit_adam: true
49
+ validation_steps: 2
50
+ variant: null
logs/train_controlnet/events.out.tfevents.1720078310.915fcea12aba.110.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a63b8a99b91cd013c97ac22fdf2aa7204af008fab81a80499cad816fb362ff53
3
+ size 20347949