EpsilonGreedy commited on
Commit
8dd90b0
1 Parent(s): e11acf7

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ images_0.png filter=lfs diff=lfs merge=lfs -text
37
+ images_1.png filter=lfs diff=lfs merge=lfs -text
38
+ images_2.png filter=lfs diff=lfs merge=lfs -text
39
+ images_3.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ library_name: diffusers
4
+ tags:
5
+ - stable-diffusion
6
+ - stable-diffusion-diffusers
7
+ - text-to-image
8
+ - diffusers
9
+ - controlnet
10
+ - diffusers-training
11
+ base_model: runwayml/stable-diffusion-v1-5
12
+ inference: true
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+
19
+ # controlnet-EpsilonGreedy/Clothes2Person5e-060.5
20
+
21
+ These are controlnet weights trained on runwayml/stable-diffusion-v1-5 with new type of conditioning.
22
+ You can find some example images below.
23
+
24
+ prompt: C2PDress, a woman, refracted line and sparkles, bold graphics, suki, barcodes, gliter, round-cropped, flat texture, galactic, palladium, lower part of face, stands in front of a white background
25
+ ![images_0)](./images_0.png)
26
+ prompt: C2PUpperBody, a woman wearing black leather skirt, anthro, minimalist photorealist, t-top, pelisse, a new, features between french, inspired by James Baynes, flat icon, oppai, cut out, very ahestetic, stands in front of a white background
27
+ ![images_1)](./images_1.png)
28
+ prompt: C2PLowerBody, a woman in a black top with a black jacket, elegant cape, wearing a cropped top, 4 arms, posh, cloth sim, inspired by Rose ONeill, listing image, proportions off, bubblegum, lower part of face, stands in front of a white background
29
+ ![images_2)](./images_2.png)
30
+ prompt: C2PUpperBody, a man in tan shorts standing in front of a white wall, discord moderator, round-cropped, summer vibrance, simplified, dividing it into nine quarters, sculls, scandinavian style, nbc, lower part of face, stands in front of a white background
31
+ ![images_3)](./images_3.png)
32
+
33
+
34
+
35
+ ## Intended uses & limitations
36
+
37
+ #### How to use
38
+
39
+ ```python
40
+ # TODO: add an example code snippet for running this diffusion pipeline
41
+ ```
42
+
43
+ #### Limitations and bias
44
+
45
+ [TODO: provide examples of latent issues and potential remediations]
46
+
47
+ ## Training details
48
+
49
+ [TODO: describe the data used to train the model]
checkpoint-2812/controlnet/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "class_embed_type": null,
16
+ "conditioning_channels": 3,
17
+ "conditioning_embedding_out_channels": [
18
+ 16,
19
+ 32,
20
+ 96,
21
+ 256
22
+ ],
23
+ "controlnet_conditioning_channel_order": "rgb",
24
+ "cross_attention_dim": 768,
25
+ "down_block_types": [
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "DownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "encoder_hid_dim": null,
33
+ "encoder_hid_dim_type": null,
34
+ "flip_sin_to_cos": true,
35
+ "freq_shift": 0,
36
+ "global_pool_conditions": false,
37
+ "in_channels": 4,
38
+ "layers_per_block": 2,
39
+ "mid_block_scale_factor": 1,
40
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
41
+ "norm_eps": 1e-05,
42
+ "norm_num_groups": 32,
43
+ "num_attention_heads": null,
44
+ "num_class_embeds": null,
45
+ "only_cross_attention": false,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_time_scale_shift": "default",
48
+ "transformer_layers_per_block": 1,
49
+ "upcast_attention": false,
50
+ "use_linear_projection": false
51
+ }
checkpoint-2812/controlnet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:996a72719fed6a8d8a6ca72e684aa465f05778b11980aa4d174e0e2a2a56d60a
3
+ size 1445157120
checkpoint-2812/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2a6cc56fe182f6b7ec73e5d7148b5dbec09ef07d3322e250317fdc4cab1267
3
+ size 725253818
checkpoint-2812/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79a4599609103fd5ef51a39251f881055d7b6dabf7c01e37eeb0c2f880938137
3
+ size 14604
checkpoint-2812/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c63701b460d8a42c46a58df1b4eae030a10b19724b54a4b11f8f4b5161090fb7
3
+ size 1000
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "class_embed_type": null,
16
+ "conditioning_channels": 3,
17
+ "conditioning_embedding_out_channels": [
18
+ 16,
19
+ 32,
20
+ 96,
21
+ 256
22
+ ],
23
+ "controlnet_conditioning_channel_order": "rgb",
24
+ "cross_attention_dim": 768,
25
+ "down_block_types": [
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "DownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "encoder_hid_dim": null,
33
+ "encoder_hid_dim_type": null,
34
+ "flip_sin_to_cos": true,
35
+ "freq_shift": 0,
36
+ "global_pool_conditions": false,
37
+ "in_channels": 4,
38
+ "layers_per_block": 2,
39
+ "mid_block_scale_factor": 1,
40
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
41
+ "norm_eps": 1e-05,
42
+ "norm_num_groups": 32,
43
+ "num_attention_heads": null,
44
+ "num_class_embeds": null,
45
+ "only_cross_attention": false,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_time_scale_shift": "default",
48
+ "transformer_layers_per_block": 1,
49
+ "upcast_attention": false,
50
+ "use_linear_projection": false
51
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:996a72719fed6a8d8a6ca72e684aa465f05778b11980aa4d174e0e2a2a56d60a
3
+ size 1445157120
image_control.png ADDED
images_0.png ADDED

Git LFS Details

  • SHA256: 7bc94a7560c6eb85db7c58f97d81f7a32a8f18b6eadc4ebe0a7cb048c277c334
  • Pointer size: 132 Bytes
  • Size of remote file: 2.52 MB
images_1.png ADDED

Git LFS Details

  • SHA256: 22b28fc68bac65de7bf095131eb1ce755949f854e6ebef6ead07d5dd27cf0d52
  • Pointer size: 132 Bytes
  • Size of remote file: 2.06 MB
images_2.png ADDED

Git LFS Details

  • SHA256: 2eef94abc1024a228bfc8bb5e0010028e96a5d5f6989e7682e21d3907aa36e9d
  • Pointer size: 132 Bytes
  • Size of remote file: 2.01 MB
images_3.png ADDED

Git LFS Details

  • SHA256: e1eadbe99829eeac3c72061588728e20ce6dcccea6376f403b93c5743f4c4c82
  • Pointer size: 132 Bytes
  • Size of remote file: 1.67 MB
logs/train_controlnet/1719649876.2860854/events.out.tfevents.1719649876.5ff2e31abbe4.110.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c19f7c066894553de3236172748e56ba3885d2a96e8723b4e6e17776353ebde
3
+ size 2465
logs/train_controlnet/1719649876.2886508/hparams.yml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ caption_column: text
8
+ checkpointing_steps: 2812
9
+ checkpoints_total_limit: null
10
+ conditioning_image_column: conditioning_image
11
+ controlnet_model_name_or_path: null
12
+ dataloader_num_workers: 4
13
+ dataset_config_name: null
14
+ dataset_name: EpsilonGreedy/Clothes2Person
15
+ enable_xformers_memory_efficient_attention: false
16
+ gradient_accumulation_steps: 4
17
+ gradient_checkpointing: true
18
+ hub_model_id: EpsilonGreedy/Clothes2Person5e-060.5
19
+ hub_token: null
20
+ image_column: image
21
+ learning_rate: 5.0e-06
22
+ logging_dir: logs
23
+ lr_num_cycles: 1
24
+ lr_power: 1.0
25
+ lr_scheduler: constant_with_warmup
26
+ lr_warmup_steps: 500
27
+ max_grad_norm: 1.0
28
+ max_train_samples: null
29
+ max_train_steps: 2812
30
+ mixed_precision: null
31
+ num_train_epochs: 1
32
+ num_validation_images: 4
33
+ output_dir: /kaggle/working/output/Clothes2Person
34
+ pretrained_model_name_or_path: runwayml/stable-diffusion-v1-5
35
+ proportion_empty_prompts: 0.5
36
+ push_to_hub: true
37
+ report_to: tensorboard
38
+ resolution: 512
39
+ resume_from_checkpoint: latest
40
+ revision: null
41
+ scale_lr: false
42
+ seed: null
43
+ set_grads_to_none: false
44
+ tokenizer_name: null
45
+ tracker_project_name: train_controlnet
46
+ train_batch_size: 3
47
+ train_data_dir: null
48
+ use_8bit_adam: true
49
+ validation_steps: 2812
50
+ variant: null
logs/train_controlnet/events.out.tfevents.1719649876.5ff2e31abbe4.110.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78ff572af2eaf6db043480bf75d178a3eb42bc392e57ab548c1558b19158128f
3
+ size 17197146