anton-l HF staff commited on
Commit
eb82621
β€’
1 Parent(s): 245f52e

ONNX weights

Browse files
model_index.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "_class_name": "StableDiffusionPipeline",
3
  "_diffusers_version": "0.6.0",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
7
  ],
8
  "safety_checker": [
9
- "stable_diffusion",
10
- "StableDiffusionSafetyChecker"
11
  ],
12
  "scheduler": [
13
  "diffusers",
14
  "PNDMScheduler"
15
  ],
16
  "text_encoder": [
17
- "transformers",
18
- "CLIPTextModel"
19
  ],
20
  "tokenizer": [
21
  "transformers",
@@ -23,10 +23,14 @@
23
  ],
24
  "unet": [
25
  "diffusers",
26
- "UNet2DConditionModel"
 
 
 
 
27
  ],
28
- "vae": [
29
  "diffusers",
30
- "AutoencoderKL"
31
  ]
32
  }
1
  {
2
+ "_class_name": "OnnxStableDiffusionPipeline",
3
  "_diffusers_version": "0.6.0",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
7
  ],
8
  "safety_checker": [
9
+ "diffusers",
10
+ "OnnxRuntimeModel"
11
  ],
12
  "scheduler": [
13
  "diffusers",
14
  "PNDMScheduler"
15
  ],
16
  "text_encoder": [
17
+ "diffusers",
18
+ "OnnxRuntimeModel"
19
  ],
20
  "tokenizer": [
21
  "transformers",
23
  ],
24
  "unet": [
25
  "diffusers",
26
+ "OnnxRuntimeModel"
27
+ ],
28
+ "vae_decoder": [
29
+ "diffusers",
30
+ "OnnxRuntimeModel"
31
  ],
32
+ "vae_encoder": [
33
  "diffusers",
34
+ "OnnxRuntimeModel"
35
  ]
36
  }
safety_checker/config.json DELETED
@@ -1,175 +0,0 @@
1
- {
2
- "_commit_hash": "4bb648a606ef040e7685bde262611766a5fdd67b",
3
- "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
- "architectures": [
5
- "StableDiffusionSafetyChecker"
6
- ],
7
- "initializer_factor": 1.0,
8
- "logit_scale_init_value": 2.6592,
9
- "model_type": "clip",
10
- "projection_dim": 768,
11
- "text_config": {
12
- "_name_or_path": "",
13
- "add_cross_attention": false,
14
- "architectures": null,
15
- "attention_dropout": 0.0,
16
- "bad_words_ids": null,
17
- "bos_token_id": 0,
18
- "chunk_size_feed_forward": 0,
19
- "cross_attention_hidden_size": null,
20
- "decoder_start_token_id": null,
21
- "diversity_penalty": 0.0,
22
- "do_sample": false,
23
- "dropout": 0.0,
24
- "early_stopping": false,
25
- "encoder_no_repeat_ngram_size": 0,
26
- "eos_token_id": 2,
27
- "exponential_decay_length_penalty": null,
28
- "finetuning_task": null,
29
- "forced_bos_token_id": null,
30
- "forced_eos_token_id": null,
31
- "hidden_act": "quick_gelu",
32
- "hidden_size": 768,
33
- "id2label": {
34
- "0": "LABEL_0",
35
- "1": "LABEL_1"
36
- },
37
- "initializer_factor": 1.0,
38
- "initializer_range": 0.02,
39
- "intermediate_size": 3072,
40
- "is_decoder": false,
41
- "is_encoder_decoder": false,
42
- "label2id": {
43
- "LABEL_0": 0,
44
- "LABEL_1": 1
45
- },
46
- "layer_norm_eps": 1e-05,
47
- "length_penalty": 1.0,
48
- "max_length": 20,
49
- "max_position_embeddings": 77,
50
- "min_length": 0,
51
- "model_type": "clip_text_model",
52
- "no_repeat_ngram_size": 0,
53
- "num_attention_heads": 12,
54
- "num_beam_groups": 1,
55
- "num_beams": 1,
56
- "num_hidden_layers": 12,
57
- "num_return_sequences": 1,
58
- "output_attentions": false,
59
- "output_hidden_states": false,
60
- "output_scores": false,
61
- "pad_token_id": 1,
62
- "prefix": null,
63
- "problem_type": null,
64
- "pruned_heads": {},
65
- "remove_invalid_values": false,
66
- "repetition_penalty": 1.0,
67
- "return_dict": true,
68
- "return_dict_in_generate": false,
69
- "sep_token_id": null,
70
- "task_specific_params": null,
71
- "temperature": 1.0,
72
- "tf_legacy_loss": false,
73
- "tie_encoder_decoder": false,
74
- "tie_word_embeddings": true,
75
- "tokenizer_class": null,
76
- "top_k": 50,
77
- "top_p": 1.0,
78
- "torch_dtype": null,
79
- "torchscript": false,
80
- "transformers_version": "4.22.0.dev0",
81
- "typical_p": 1.0,
82
- "use_bfloat16": false,
83
- "vocab_size": 49408
84
- },
85
- "text_config_dict": {
86
- "hidden_size": 768,
87
- "intermediate_size": 3072,
88
- "num_attention_heads": 12,
89
- "num_hidden_layers": 12
90
- },
91
- "torch_dtype": "float32",
92
- "transformers_version": null,
93
- "vision_config": {
94
- "_name_or_path": "",
95
- "add_cross_attention": false,
96
- "architectures": null,
97
- "attention_dropout": 0.0,
98
- "bad_words_ids": null,
99
- "bos_token_id": null,
100
- "chunk_size_feed_forward": 0,
101
- "cross_attention_hidden_size": null,
102
- "decoder_start_token_id": null,
103
- "diversity_penalty": 0.0,
104
- "do_sample": false,
105
- "dropout": 0.0,
106
- "early_stopping": false,
107
- "encoder_no_repeat_ngram_size": 0,
108
- "eos_token_id": null,
109
- "exponential_decay_length_penalty": null,
110
- "finetuning_task": null,
111
- "forced_bos_token_id": null,
112
- "forced_eos_token_id": null,
113
- "hidden_act": "quick_gelu",
114
- "hidden_size": 1024,
115
- "id2label": {
116
- "0": "LABEL_0",
117
- "1": "LABEL_1"
118
- },
119
- "image_size": 224,
120
- "initializer_factor": 1.0,
121
- "initializer_range": 0.02,
122
- "intermediate_size": 4096,
123
- "is_decoder": false,
124
- "is_encoder_decoder": false,
125
- "label2id": {
126
- "LABEL_0": 0,
127
- "LABEL_1": 1
128
- },
129
- "layer_norm_eps": 1e-05,
130
- "length_penalty": 1.0,
131
- "max_length": 20,
132
- "min_length": 0,
133
- "model_type": "clip_vision_model",
134
- "no_repeat_ngram_size": 0,
135
- "num_attention_heads": 16,
136
- "num_beam_groups": 1,
137
- "num_beams": 1,
138
- "num_channels": 3,
139
- "num_hidden_layers": 24,
140
- "num_return_sequences": 1,
141
- "output_attentions": false,
142
- "output_hidden_states": false,
143
- "output_scores": false,
144
- "pad_token_id": null,
145
- "patch_size": 14,
146
- "prefix": null,
147
- "problem_type": null,
148
- "pruned_heads": {},
149
- "remove_invalid_values": false,
150
- "repetition_penalty": 1.0,
151
- "return_dict": true,
152
- "return_dict_in_generate": false,
153
- "sep_token_id": null,
154
- "task_specific_params": null,
155
- "temperature": 1.0,
156
- "tf_legacy_loss": false,
157
- "tie_encoder_decoder": false,
158
- "tie_word_embeddings": true,
159
- "tokenizer_class": null,
160
- "top_k": 50,
161
- "top_p": 1.0,
162
- "torch_dtype": null,
163
- "torchscript": false,
164
- "transformers_version": "4.22.0.dev0",
165
- "typical_p": 1.0,
166
- "use_bfloat16": false
167
- },
168
- "vision_config_dict": {
169
- "hidden_size": 1024,
170
- "intermediate_size": 4096,
171
- "num_attention_heads": 16,
172
- "num_hidden_layers": 24,
173
- "patch_size": 14
174
- }
175
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
unet/diffusion_pytorch_model.bin β†’ safety_checker/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7da0e21ba7ea50637bee26e81c220844defdf01aafca02b2c42ecdadb813de4
3
- size 3438354725
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f8d71f06e12ba35212ef104470973b74a79192f723a443f239152c32e23831
3
+ size 1216193649
text_encoder/config.json DELETED
@@ -1,25 +0,0 @@
1
- {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
- "architectures": [
4
- "CLIPTextModel"
5
- ],
6
- "attention_dropout": 0.0,
7
- "bos_token_id": 0,
8
- "dropout": 0.0,
9
- "eos_token_id": 2,
10
- "hidden_act": "quick_gelu",
11
- "hidden_size": 768,
12
- "initializer_factor": 1.0,
13
- "initializer_range": 0.02,
14
- "intermediate_size": 3072,
15
- "layer_norm_eps": 1e-05,
16
- "max_position_embeddings": 77,
17
- "model_type": "clip_text_model",
18
- "num_attention_heads": 12,
19
- "num_hidden_layers": 12,
20
- "pad_token_id": 1,
21
- "projection_dim": 768,
22
- "torch_dtype": "float32",
23
- "transformers_version": "4.22.0.dev0",
24
- "vocab_size": 49408
25
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
vae/diffusion_pytorch_model.bin β†’ text_encoder/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b134cded8eb78b184aefb8805b6b572f36fa77b255c483665dda931fa0130c5
3
- size 334707217
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bb471d9a2693995e994ade398b8d6e118c6754535233228523665e619c6cdc5
3
+ size 492394398
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "/home/anton_huggingface_co/.cache/huggingface/diffusers/models--runwayml--stable-diffusion-v1-5/snapshots/245f52e962f4c0733f56daa14d2c85d3d2210e13/tokenizer",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json DELETED
@@ -1,36 +0,0 @@
1
- {
2
- "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.6.0",
4
- "act_fn": "silu",
5
- "attention_head_dim": 8,
6
- "block_out_channels": [
7
- 320,
8
- 640,
9
- 1280,
10
- 1280
11
- ],
12
- "center_input_sample": false,
13
- "cross_attention_dim": 768,
14
- "down_block_types": [
15
- "CrossAttnDownBlock2D",
16
- "CrossAttnDownBlock2D",
17
- "CrossAttnDownBlock2D",
18
- "DownBlock2D"
19
- ],
20
- "downsample_padding": 1,
21
- "flip_sin_to_cos": true,
22
- "freq_shift": 0,
23
- "in_channels": 4,
24
- "layers_per_block": 2,
25
- "mid_block_scale_factor": 1,
26
- "norm_eps": 1e-05,
27
- "norm_num_groups": 32,
28
- "out_channels": 4,
29
- "sample_size": 32,
30
- "up_block_types": [
31
- "UpBlock2D",
32
- "CrossAttnUpBlock2D",
33
- "CrossAttnUpBlock2D",
34
- "CrossAttnUpBlock2D"
35
- ]
36
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
text_encoder/pytorch_model.bin β†’ unet/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:770a47a9ffdcfda0b05506a7888ed714d06131d60267e6cf52765d61cf59fd67
3
- size 492305335
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0407225aa6d5fda3754fcbc807938639449b982e98a5dc33d2c9c562fbd282c4
3
+ size 775566
safety_checker/pytorch_model.bin β†’ unet/weights.pb RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:193490b58ef62739077262e833bf091c66c29488058681ac25cf7df3d8190974
3
- size 1216061799
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:339a499d1a3191b5879410ee7171c42ac15a3e116a570a95c3c93087e907de9d
3
+ size 3438083840
v1-inference.yaml DELETED
@@ -1,70 +0,0 @@
1
- model:
2
- base_learning_rate: 1.0e-04
3
- target: ldm.models.diffusion.ddpm.LatentDiffusion
4
- params:
5
- linear_start: 0.00085
6
- linear_end: 0.0120
7
- num_timesteps_cond: 1
8
- log_every_t: 200
9
- timesteps: 1000
10
- first_stage_key: "jpg"
11
- cond_stage_key: "txt"
12
- image_size: 64
13
- channels: 4
14
- cond_stage_trainable: false # Note: different from the one we trained before
15
- conditioning_key: crossattn
16
- monitor: val/loss_simple_ema
17
- scale_factor: 0.18215
18
- use_ema: False
19
-
20
- scheduler_config: # 10000 warmup steps
21
- target: ldm.lr_scheduler.LambdaLinearScheduler
22
- params:
23
- warm_up_steps: [ 10000 ]
24
- cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
- f_start: [ 1.e-6 ]
26
- f_max: [ 1. ]
27
- f_min: [ 1. ]
28
-
29
- unet_config:
30
- target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
- params:
32
- image_size: 32 # unused
33
- in_channels: 4
34
- out_channels: 4
35
- model_channels: 320
36
- attention_resolutions: [ 4, 2, 1 ]
37
- num_res_blocks: 2
38
- channel_mult: [ 1, 2, 4, 4 ]
39
- num_heads: 8
40
- use_spatial_transformer: True
41
- transformer_depth: 1
42
- context_dim: 768
43
- use_checkpoint: True
44
- legacy: False
45
-
46
- first_stage_config:
47
- target: ldm.models.autoencoder.AutoencoderKL
48
- params:
49
- embed_dim: 4
50
- monitor: val/rec_loss
51
- ddconfig:
52
- double_z: true
53
- z_channels: 4
54
- resolution: 256
55
- in_channels: 3
56
- out_ch: 3
57
- ch: 128
58
- ch_mult:
59
- - 1
60
- - 2
61
- - 4
62
- - 4
63
- num_res_blocks: 2
64
- attn_resolutions: []
65
- dropout: 0.0
66
- lossconfig:
67
- target: torch.nn.Identity
68
-
69
- cond_stage_config:
70
- target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
vae/config.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.6.0",
4
- "act_fn": "silu",
5
- "block_out_channels": [
6
- 128,
7
- 256,
8
- 512,
9
- 512
10
- ],
11
- "down_block_types": [
12
- "DownEncoderBlock2D",
13
- "DownEncoderBlock2D",
14
- "DownEncoderBlock2D",
15
- "DownEncoderBlock2D"
16
- ],
17
- "in_channels": 3,
18
- "latent_channels": 4,
19
- "layers_per_block": 2,
20
- "norm_num_groups": 32,
21
- "out_channels": 3,
22
- "sample_size": 256,
23
- "up_block_types": [
24
- "UpDecoderBlock2D",
25
- "UpDecoderBlock2D",
26
- "UpDecoderBlock2D",
27
- "UpDecoderBlock2D"
28
- ]
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v1-5-pruned-emaonly.ckpt β†’ vae_decoder/model.onnx RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516
3
- size 4265380512
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:500050d2dbd88e78f073d0a5bbd8100117886b1e2a7b160c29d373cd60fd08ce
3
+ size 198021963
vae_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55f84641e6266f860391ceb11787585f5778335b9f8b63eb28658e54f4e194d1
3
+ size 136709494