harshitaskh commited on
Commit
81eeba4
1 Parent(s): b3cbe8a

Upload folder using huggingface_hub

Browse files
Files changed (32) hide show
  1. .gitattributes +2 -0
  2. concept_images/README.md +15 -0
  3. concept_images/pexels-pavel-danilyuk-8638764.jpg +3 -0
  4. concept_images/pexels-puwadon-sangngern-13419211.jpg +3 -0
  5. concept_images/sd-concept-output/feature_extractor/preprocessor_config.json +28 -0
  6. concept_images/sd-concept-output/learned_embeds-step-1000.bin +3 -0
  7. concept_images/sd-concept-output/learned_embeds-step-1250.bin +3 -0
  8. concept_images/sd-concept-output/learned_embeds-step-1500.bin +3 -0
  9. concept_images/sd-concept-output/learned_embeds-step-1750.bin +3 -0
  10. concept_images/sd-concept-output/learned_embeds-step-2000.bin +3 -0
  11. concept_images/sd-concept-output/learned_embeds-step-250.bin +3 -0
  12. concept_images/sd-concept-output/learned_embeds-step-500.bin +3 -0
  13. concept_images/sd-concept-output/learned_embeds-step-750.bin +3 -0
  14. concept_images/sd-concept-output/learned_embeds.bin +3 -0
  15. concept_images/sd-concept-output/model_index.json +34 -0
  16. concept_images/sd-concept-output/scheduler/scheduler_config.json +20 -0
  17. concept_images/sd-concept-output/text_encoder/config.json +25 -0
  18. concept_images/sd-concept-output/text_encoder/model.safetensors +3 -0
  19. concept_images/sd-concept-output/tokenizer/added_tokens.json +3 -0
  20. concept_images/sd-concept-output/tokenizer/merges.txt +0 -0
  21. concept_images/sd-concept-output/tokenizer/special_tokens_map.json +24 -0
  22. concept_images/sd-concept-output/tokenizer/tokenizer_config.json +33 -0
  23. concept_images/sd-concept-output/tokenizer/vocab.json +0 -0
  24. concept_images/sd-concept-output/unet/config.json +71 -0
  25. concept_images/sd-concept-output/unet/diffusion_pytorch_model.safetensors +3 -0
  26. concept_images/sd-concept-output/vae/config.json +32 -0
  27. concept_images/sd-concept-output/vae/diffusion_pytorch_model.safetensors +3 -0
  28. concept_images/thumbsUp11.png +0 -0
  29. concept_images/thumbsUp13.png +0 -0
  30. concept_images/thumbsUp9.png +0 -0
  31. concept_images/token_identifier.txt +1 -0
  32. concept_images/type_of_concept.txt +1 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ concept_images/pexels-pavel-danilyuk-8638764.jpg filter=lfs diff=lfs merge=lfs -text
37
+ concept_images/pexels-puwadon-sangngern-13419211.jpg filter=lfs diff=lfs merge=lfs -text
concept_images/README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: stabilityai/stable-diffusion-2-1
4
+ ---
5
+ ### thumbsUp_v2 on Stable Diffusion
6
+ This is the `<thumbs-up>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb).
7
+
8
+ Here is the new concept you will be able to use as an `object`:
9
+ ![<thumbs-up> 0](https://huggingface.co/sd-concepts-library/thumbsup-v2/resolve/main/concept_images/thumbsUp9.png)
10
+ ![<thumbs-up> 1](https://huggingface.co/sd-concepts-library/thumbsup-v2/resolve/main/concept_images/thumbsUp11.png)
11
+ ![<thumbs-up> 2](https://huggingface.co/sd-concepts-library/thumbsup-v2/resolve/main/concept_images/thumbsUp13.png)
12
+ ![<thumbs-up> 3](https://huggingface.co/sd-concepts-library/thumbsup-v2/resolve/main/concept_images/pexels-puwadon-sangngern-13419211.jpg)
13
+ ![<thumbs-up> 4](https://huggingface.co/sd-concepts-library/thumbsup-v2/resolve/main/concept_images/pexels-pavel-danilyuk-8638764.jpg)
14
+ ![<thumbs-up> 5](https://huggingface.co/sd-concepts-library/thumbsup-v2/resolve/main/concept_images/sd-concept-output)
15
+
concept_images/pexels-pavel-danilyuk-8638764.jpg ADDED

Git LFS Details

  • SHA256: ff937d69c77d8806e6aeb70e15192183cec38b51a5e22a4e84da00950a1d3d65
  • Pointer size: 132 Bytes
  • Size of remote file: 1.51 MB
concept_images/pexels-puwadon-sangngern-13419211.jpg ADDED

Git LFS Details

  • SHA256: e2544fa0e452736850c17d47835b42522525e7fe5ea842bdfe62fe10798ca1e1
  • Pointer size: 132 Bytes
  • Size of remote file: 1.67 MB
concept_images/sd-concept-output/feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
concept_images/sd-concept-output/learned_embeds-step-1000.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe546781301c0050b74baf211ba64d43fba3e2ee54da860bd1a89e68ba576bad
3
+ size 4958
concept_images/sd-concept-output/learned_embeds-step-1250.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c50888d6c4d5ef25cb541559a709f334aa9270c17182b6f80fe8e01aa68cbc5f
3
+ size 4958
concept_images/sd-concept-output/learned_embeds-step-1500.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6b7207c210b9d4bb44aa66d79723916df614a1aff738c063c726578cc1e6bb7
3
+ size 4958
concept_images/sd-concept-output/learned_embeds-step-1750.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29879539fbf401dddeebdb16889f3077deb518c3298bd0cabc43e04b2bc29a5f
3
+ size 4958
concept_images/sd-concept-output/learned_embeds-step-2000.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7317a5129200f897e0657de0844c4d523a0e845104a14549292b5e40521f308
3
+ size 4958
concept_images/sd-concept-output/learned_embeds-step-250.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb2bcfa4cb3a7c57ade2606a424241ad914c687eda13203720296841a0eda1ba
3
+ size 4955
concept_images/sd-concept-output/learned_embeds-step-500.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af8399ec77f4463621197e3a2c492272bf4b07ecb8cf6541b967802411ace97b
3
+ size 4955
concept_images/sd-concept-output/learned_embeds-step-750.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cf1ca876bc4ce4af8086100afb53a7907d5ad763424584de23ddf010e4c3ab7
3
+ size 4955
concept_images/sd-concept-output/learned_embeds.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900699ca30b09f0453ce0584933fc180f060f73950fc0d05caac78964a0c285e
3
+ size 4864
concept_images/sd-concept-output/model_index.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.21.0.dev0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-1",
5
+ "feature_extractor": [
6
+ "transformers",
7
+ "CLIPImageProcessor"
8
+ ],
9
+ "requires_safety_checker": false,
10
+ "safety_checker": [
11
+ null,
12
+ null
13
+ ],
14
+ "scheduler": [
15
+ "diffusers",
16
+ "DDIMScheduler"
17
+ ],
18
+ "text_encoder": [
19
+ "transformers",
20
+ "CLIPTextModel"
21
+ ],
22
+ "tokenizer": [
23
+ "transformers",
24
+ "CLIPTokenizer"
25
+ ],
26
+ "unet": [
27
+ "diffusers",
28
+ "UNet2DConditionModel"
29
+ ],
30
+ "vae": [
31
+ "diffusers",
32
+ "AutoencoderKL"
33
+ ]
34
+ }
concept_images/sd-concept-output/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.21.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "v_prediction",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "skip_prk_steps": true,
16
+ "steps_offset": 1,
17
+ "thresholding": false,
18
+ "timestep_spacing": "leading",
19
+ "trained_betas": null
20
+ }
concept_images/sd-concept-output/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "stabilityai/stable-diffusion-2-1",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 23,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.32.0",
24
+ "vocab_size": 49409
25
+ }
concept_images/sd-concept-output/text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b014276651034c74804aeee7596ca8a5beefafc4bea8dd006eed8cf83090e03
3
+ size 1361600400
concept_images/sd-concept-output/tokenizer/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<thumbs-up>": 49408
3
+ }
concept_images/sd-concept-output/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
concept_images/sd-concept-output/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
concept_images/sd-concept-output/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "<|endoftext|>",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
concept_images/sd-concept-output/tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
concept_images/sd-concept-output/unet/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.21.0.dev0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-1",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "addition_time_embed_dim": null,
9
+ "attention_head_dim": [
10
+ 5,
11
+ 10,
12
+ 20,
13
+ 20
14
+ ],
15
+ "attention_type": "default",
16
+ "block_out_channels": [
17
+ 320,
18
+ 640,
19
+ 1280,
20
+ 1280
21
+ ],
22
+ "center_input_sample": false,
23
+ "class_embed_type": null,
24
+ "class_embeddings_concat": false,
25
+ "conv_in_kernel": 3,
26
+ "conv_out_kernel": 3,
27
+ "cross_attention_dim": 1024,
28
+ "cross_attention_norm": null,
29
+ "down_block_types": [
30
+ "CrossAttnDownBlock2D",
31
+ "CrossAttnDownBlock2D",
32
+ "CrossAttnDownBlock2D",
33
+ "DownBlock2D"
34
+ ],
35
+ "downsample_padding": 1,
36
+ "dual_cross_attention": false,
37
+ "encoder_hid_dim": null,
38
+ "encoder_hid_dim_type": null,
39
+ "flip_sin_to_cos": true,
40
+ "freq_shift": 0,
41
+ "in_channels": 4,
42
+ "layers_per_block": 2,
43
+ "mid_block_only_cross_attention": null,
44
+ "mid_block_scale_factor": 1,
45
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
46
+ "norm_eps": 1e-05,
47
+ "norm_num_groups": 32,
48
+ "num_attention_heads": null,
49
+ "num_class_embeds": null,
50
+ "only_cross_attention": false,
51
+ "out_channels": 4,
52
+ "projection_class_embeddings_input_dim": null,
53
+ "resnet_out_scale_factor": 1.0,
54
+ "resnet_skip_time_act": false,
55
+ "resnet_time_scale_shift": "default",
56
+ "sample_size": 96,
57
+ "time_cond_proj_dim": null,
58
+ "time_embedding_act_fn": null,
59
+ "time_embedding_dim": null,
60
+ "time_embedding_type": "positional",
61
+ "timestep_post_act": null,
62
+ "transformer_layers_per_block": 1,
63
+ "up_block_types": [
64
+ "UpBlock2D",
65
+ "CrossAttnUpBlock2D",
66
+ "CrossAttnUpBlock2D",
67
+ "CrossAttnUpBlock2D"
68
+ ],
69
+ "upcast_attention": true,
70
+ "use_linear_projection": true
71
+ }
concept_images/sd-concept-output/unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a3a4d7978884c5e4ef00b62641b1b544b257be2f6715d984188610ad6475ad2
3
+ size 1731904736
concept_images/sd-concept-output/vae/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.21.0.dev0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-1",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 768,
25
+ "scaling_factor": 0.18215,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
concept_images/sd-concept-output/vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e4c08995484ee61270175e9e7a072b66a6e4eeb5f0c266667fe1f45b90daf9a
3
+ size 167335342
concept_images/thumbsUp11.png ADDED
concept_images/thumbsUp13.png ADDED
concept_images/thumbsUp9.png ADDED
concept_images/token_identifier.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ <thumbs-up>
concept_images/type_of_concept.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ object