valhalla commited on
Commit
c67c05b
1 Parent(s): d2924f3

fp16 weights

Browse files
model_index.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_class_name": "StableDiffusionPipeline",
3
  "_diffusers_version": "0.12.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
 
1
  {
2
+ "_class_name": "StableDiffusionInstructPix2PixPipeline",
3
  "_diffusers_version": "0.12.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
safety_checker/config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "_commit_hash": "cb41f3a270d63d454d385fc2e4f571c487c253c5",
3
- "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
6
  ],
@@ -80,7 +80,7 @@
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
- "transformers_version": "4.26.0.dev0",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
@@ -91,7 +91,7 @@
91
  "num_attention_heads": 12,
92
  "num_hidden_layers": 12
93
  },
94
- "torch_dtype": "float32",
95
  "transformers_version": null,
96
  "vision_config": {
97
  "_name_or_path": "",
@@ -167,7 +167,7 @@
167
  "top_p": 1.0,
168
  "torch_dtype": null,
169
  "torchscript": false,
170
- "transformers_version": "4.26.0.dev0",
171
  "typical_p": 1.0,
172
  "use_bfloat16": false
173
  },
 
1
  {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "./instruct-pix2pix/safety_checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
6
  ],
 
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
+ "transformers_version": "4.25.1",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
 
91
  "num_attention_heads": 12,
92
  "num_hidden_layers": 12
93
  },
94
+ "torch_dtype": "float16",
95
  "transformers_version": null,
96
  "vision_config": {
97
  "_name_or_path": "",
 
167
  "top_p": 1.0,
168
  "torch_dtype": null,
169
  "torchscript": false,
170
+ "transformers_version": "4.25.1",
171
  "typical_p": 1.0,
172
  "use_bfloat16": false
173
  },
safety_checker/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
3
- size 1216064769
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70b4ace4d3096de0a0c6d3574377bbaa17c6338daa9c728ea32f9cfd80a8907c
3
+ size 608101569
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -19,7 +19,7 @@
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
- "torch_dtype": "float32",
23
- "transformers_version": "4.26.0.dev0",
24
  "vocab_size": 49408
25
  }
 
1
  {
2
+ "_name_or_path": "./instruct-pix2pix/text_encoder",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.25.1",
24
  "vocab_size": 49408
25
  }
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98124f3d5663b2f14ff08d4c29db93800622b4fcfa3d952bb6f9112f5d6dadd7
3
- size 492307041
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a371c4c8b166f660e8a97f5e618f27fb1b21d56dc2d171df1973e09e988d003c
3
+ size 246186081
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "./instruct-pix2pix/tokenizer",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.12.0.dev0",
 
4
  "act_fn": "silu",
5
  "attention_head_dim": 8,
6
  "block_out_channels": [
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.12.0.dev0",
4
+ "_name_or_path": "./instruct-pix2pix/unet",
5
  "act_fn": "silu",
6
  "attention_head_dim": 8,
7
  "block_out_channels": [
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b83d6c874bc598364d113910c8d5a521eb83550fcecc0b4ddd2472c6de4323db
3
- size 3438412453
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5c12f3ea6e5173c706fcd6052698b8e5029f2185c329225f57f7c071e37e916
3
+ size 1719347493
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.12.0.dev0",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.12.0.dev0",
4
+ "_name_or_path": "./instruct-pix2pix/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af27ea858349760ebe3311953e0bfe8d6fd257dc9537ae0b2b938c262132a2c6
3
- size 334711857
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6e817ec7c5c6641c0000a1a7fc7893f6625251131f33f7bb034c55e5d949455
3
+ size 167404145