kirisame commited on
Commit
44d0d4d
1 Parent(s): aa37d5b

add v1-2 fp16 weights

Browse files
model_index.json CHANGED
@@ -9,14 +9,14 @@
9
  "stable_diffusion",
10
  "StableDiffusionSafetyChecker"
11
  ],
12
- "text_encoder": [
13
- "transformers",
14
- "CLIPTextModel"
15
- ],
16
  "scheduler": [
17
  "diffusers",
18
  "DDIMScheduler"
19
  ],
 
 
 
 
20
  "tokenizer": [
21
  "transformers",
22
  "CLIPTokenizer"
 
9
  "stable_diffusion",
10
  "StableDiffusionSafetyChecker"
11
  ],
 
 
 
 
12
  "scheduler": [
13
  "diffusers",
14
  "DDIMScheduler"
15
  ],
16
+ "text_encoder": [
17
+ "transformers",
18
+ "CLIPTextModel"
19
+ ],
20
  "tokenizer": [
21
  "transformers",
22
  "CLIPTokenizer"
safety_checker/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./safety_module",
3
  "architectures": [
4
  "StableDiffusionSafetyChecker"
5
  ],
@@ -68,6 +68,7 @@
68
  "sep_token_id": null,
69
  "task_specific_params": null,
70
  "temperature": 1.0,
 
71
  "tie_encoder_decoder": false,
72
  "tie_word_embeddings": true,
73
  "tokenizer_class": null,
@@ -75,7 +76,7 @@
75
  "top_p": 1.0,
76
  "torch_dtype": null,
77
  "torchscript": false,
78
- "transformers_version": "4.21.0.dev0",
79
  "typical_p": 1.0,
80
  "use_bfloat16": false,
81
  "vocab_size": 49408
@@ -86,7 +87,7 @@
86
  "num_attention_heads": 12,
87
  "num_hidden_layers": 12
88
  },
89
- "torch_dtype": "float32",
90
  "transformers_version": null,
91
  "vision_config": {
92
  "_name_or_path": "",
@@ -133,6 +134,7 @@
133
  "num_attention_heads": 16,
134
  "num_beam_groups": 1,
135
  "num_beams": 1,
 
136
  "num_hidden_layers": 24,
137
  "num_return_sequences": 1,
138
  "output_attentions": false,
@@ -150,6 +152,7 @@
150
  "sep_token_id": null,
151
  "task_specific_params": null,
152
  "temperature": 1.0,
 
153
  "tie_encoder_decoder": false,
154
  "tie_word_embeddings": true,
155
  "tokenizer_class": null,
@@ -157,7 +160,7 @@
157
  "top_p": 1.0,
158
  "torch_dtype": null,
159
  "torchscript": false,
160
- "transformers_version": "4.21.0.dev0",
161
  "typical_p": 1.0,
162
  "use_bfloat16": false
163
  },
 
1
  {
2
+ "_name_or_path": "waifu-diffusion/safety_checker",
3
  "architectures": [
4
  "StableDiffusionSafetyChecker"
5
  ],
 
68
  "sep_token_id": null,
69
  "task_specific_params": null,
70
  "temperature": 1.0,
71
+ "tf_legacy_loss": false,
72
  "tie_encoder_decoder": false,
73
  "tie_word_embeddings": true,
74
  "tokenizer_class": null,
 
76
  "top_p": 1.0,
77
  "torch_dtype": null,
78
  "torchscript": false,
79
+ "transformers_version": "4.21.3",
80
  "typical_p": 1.0,
81
  "use_bfloat16": false,
82
  "vocab_size": 49408
 
87
  "num_attention_heads": 12,
88
  "num_hidden_layers": 12
89
  },
90
+ "torch_dtype": "float16",
91
  "transformers_version": null,
92
  "vision_config": {
93
  "_name_or_path": "",
 
134
  "num_attention_heads": 16,
135
  "num_beam_groups": 1,
136
  "num_beams": 1,
137
+ "num_channels": 3,
138
  "num_hidden_layers": 24,
139
  "num_return_sequences": 1,
140
  "output_attentions": false,
 
152
  "sep_token_id": null,
153
  "task_specific_params": null,
154
  "temperature": 1.0,
155
+ "tf_legacy_loss": false,
156
  "tie_encoder_decoder": false,
157
  "tie_word_embeddings": true,
158
  "tokenizer_class": null,
 
160
  "top_p": 1.0,
161
  "torch_dtype": null,
162
  "torchscript": false,
163
+ "transformers_version": "4.21.3",
164
  "typical_p": 1.0,
165
  "use_bfloat16": false
166
  },
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -18,7 +18,7 @@
18
  "num_attention_heads": 12,
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
- "torch_dtype": "float32",
22
- "transformers_version": "4.21.2",
23
  "vocab_size": 49408
24
  }
 
1
  {
2
+ "_name_or_path": "waifu-diffusion/text_encoder",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
18
  "num_attention_heads": 12,
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.21.3",
23
  "vocab_size": 49408
24
  }
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "waifu-diffusion/tokenizer",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.2.4",
 
4
  "act_fn": "silu",
5
  "attention_head_dim": 8,
6
  "block_out_channels": [
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.2.4",
4
+ "_name_or_path": "waifu-diffusion/unet",
5
  "act_fn": "silu",
6
  "attention_head_dim": 8,
7
  "block_out_channels": [
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a0975433af275427f2feca4b2658803871443141efd54450323c72f5e824a34
3
  size 1719322405
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e560ff047c2f0227b5188a7325e59301ec61507200cc00543c2a71440fdb8da0
3
  size 1719322405
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.2.4",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.2.4",
4
+ "_name_or_path": "waifu-diffusion/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,