bdbao commited on
Commit
f898bb8
1 Parent(s): 7a9e137

End of training

Browse files
.gitattributes CHANGED
@@ -35,3 +35,9 @@ unet/diffusion_pytorch_model.fp16.safetensors filter=lfs diff=lfs merge=lfs -tex
35
  text_encoder/model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
36
  vae/diffusion_pytorch_model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
37
  safety_checker/model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
35
  text_encoder/model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
36
  vae/diffusion_pytorch_model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
37
  safety_checker/model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-500/model.safetensors filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-500/model_1.safetensors filter=lfs diff=lfs merge=lfs -text
40
+ safety_checker/model.safetensors filter=lfs diff=lfs merge=lfs -text
41
+ text_encoder/model.safetensors filter=lfs diff=lfs merge=lfs -text
42
+ unet/diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
43
+ vae/diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
checkpoint-500/custom_checkpoint_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bf46c33758fb766e8d60d79128d00aef81c9aba1c41136209f0a085e098dc9d
3
+ size 1040
checkpoint-500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e26fff15c2f39327cae9d65b96126ed14a9b23ec516ae368e118c3dab28b4b0
3
+ size 3438225152
checkpoint-500/model_1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5624a5012acdc9b7e7a19ebe0b7aacb388bef790683ac86d5eecad5c4c54e0f
3
+ size 492265168
checkpoint-500/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11c412a5959ec5d7c257aa248f6a28b7104c8bfe12ed6d9b06138058d3bfa7db
3
+ size 7861518840
checkpoint-500/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c142cf5c390134038e3508e802868a3ba77c14f17d86b05fbf86684f30c33b14
3
+ size 14344
checkpoint-500/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65115106fbd9463fa6c7133b543dece71c31fcba363ff15b8c73f8c7eb0739c5
3
+ size 1000
feature_extractor/preprocessor_config.json CHANGED
@@ -1,8 +1,12 @@
1
  {
2
- "crop_size": 224,
 
 
 
3
  "do_center_crop": true,
4
  "do_convert_rgb": true,
5
  "do_normalize": true,
 
6
  "do_resize": true,
7
  "feature_extractor_type": "CLIPFeatureExtractor",
8
  "image_mean": [
@@ -10,11 +14,15 @@
10
  0.4578275,
11
  0.40821073
12
  ],
 
13
  "image_std": [
14
  0.26862954,
15
  0.26130258,
16
  0.27577711
17
  ],
18
  "resample": 3,
19
- "size": 224
 
 
 
20
  }
 
1
  {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
  "do_center_crop": true,
7
  "do_convert_rgb": true,
8
  "do_normalize": true,
9
+ "do_rescale": true,
10
  "do_resize": true,
11
  "feature_extractor_type": "CLIPFeatureExtractor",
12
  "image_mean": [
 
14
  0.4578275,
15
  0.40821073
16
  ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
21
  0.27577711
22
  ],
23
  "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
  }
model_index.json CHANGED
@@ -1,10 +1,16 @@
1
  {
2
- "_class_name": "StableDiffusionInpaintPipeline",
3
- "_diffusers_version": "0.6.0",
 
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPImageProcessor"
7
  ],
 
 
 
 
 
8
  "safety_checker": [
9
  "stable_diffusion",
10
  "StableDiffusionSafetyChecker"
 
1
  {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.25.0",
4
+ "_name_or_path": "runwayml/stable-diffusion-inpainting",
5
  "feature_extractor": [
6
  "transformers",
7
  "CLIPImageProcessor"
8
  ],
9
+ "image_encoder": [
10
+ null,
11
+ null
12
+ ],
13
+ "requires_safety_checker": true,
14
  "safety_checker": [
15
  "stable_diffusion",
16
  "StableDiffusionSafetyChecker"
safety_checker/config.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
- "_commit_hash": "4bb648a606ef040e7685bde262611766a5fdd67b",
3
- "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
6
  ],
@@ -9,167 +8,19 @@
9
  "model_type": "clip",
10
  "projection_dim": 768,
11
  "text_config": {
12
- "_name_or_path": "",
13
- "add_cross_attention": false,
14
- "architectures": null,
15
- "attention_dropout": 0.0,
16
- "bad_words_ids": null,
17
- "bos_token_id": 0,
18
- "chunk_size_feed_forward": 0,
19
- "cross_attention_hidden_size": null,
20
- "decoder_start_token_id": null,
21
- "diversity_penalty": 0.0,
22
- "do_sample": false,
23
  "dropout": 0.0,
24
- "early_stopping": false,
25
- "encoder_no_repeat_ngram_size": 0,
26
- "eos_token_id": 2,
27
- "exponential_decay_length_penalty": null,
28
- "finetuning_task": null,
29
- "forced_bos_token_id": null,
30
- "forced_eos_token_id": null,
31
- "hidden_act": "quick_gelu",
32
  "hidden_size": 768,
33
- "id2label": {
34
- "0": "LABEL_0",
35
- "1": "LABEL_1"
36
- },
37
- "initializer_factor": 1.0,
38
- "initializer_range": 0.02,
39
  "intermediate_size": 3072,
40
- "is_decoder": false,
41
- "is_encoder_decoder": false,
42
- "label2id": {
43
- "LABEL_0": 0,
44
- "LABEL_1": 1
45
- },
46
- "layer_norm_eps": 1e-05,
47
- "length_penalty": 1.0,
48
- "max_length": 20,
49
- "max_position_embeddings": 77,
50
- "min_length": 0,
51
  "model_type": "clip_text_model",
52
- "no_repeat_ngram_size": 0,
53
- "num_attention_heads": 12,
54
- "num_beam_groups": 1,
55
- "num_beams": 1,
56
- "num_hidden_layers": 12,
57
- "num_return_sequences": 1,
58
- "output_attentions": false,
59
- "output_hidden_states": false,
60
- "output_scores": false,
61
- "pad_token_id": 1,
62
- "prefix": null,
63
- "problem_type": null,
64
- "projection_dim": 512,
65
- "pruned_heads": {},
66
- "remove_invalid_values": false,
67
- "repetition_penalty": 1.0,
68
- "return_dict": true,
69
- "return_dict_in_generate": false,
70
- "sep_token_id": null,
71
- "task_specific_params": null,
72
- "temperature": 1.0,
73
- "tf_legacy_loss": false,
74
- "tie_encoder_decoder": false,
75
- "tie_word_embeddings": true,
76
- "tokenizer_class": null,
77
- "top_k": 50,
78
- "top_p": 1.0,
79
- "torch_dtype": null,
80
- "torchscript": false,
81
- "transformers_version": "4.22.0.dev0",
82
- "typical_p": 1.0,
83
- "use_bfloat16": false,
84
- "vocab_size": 49408
85
- },
86
- "text_config_dict": {
87
- "hidden_size": 768,
88
- "intermediate_size": 3072,
89
- "num_attention_heads": 12,
90
- "num_hidden_layers": 12
91
  },
92
  "torch_dtype": "float32",
93
- "transformers_version": null,
94
  "vision_config": {
95
- "_name_or_path": "",
96
- "add_cross_attention": false,
97
- "architectures": null,
98
- "attention_dropout": 0.0,
99
- "bad_words_ids": null,
100
- "bos_token_id": null,
101
- "chunk_size_feed_forward": 0,
102
- "cross_attention_hidden_size": null,
103
- "decoder_start_token_id": null,
104
- "diversity_penalty": 0.0,
105
- "do_sample": false,
106
  "dropout": 0.0,
107
- "early_stopping": false,
108
- "encoder_no_repeat_ngram_size": 0,
109
- "eos_token_id": null,
110
- "exponential_decay_length_penalty": null,
111
- "finetuning_task": null,
112
- "forced_bos_token_id": null,
113
- "forced_eos_token_id": null,
114
- "hidden_act": "quick_gelu",
115
  "hidden_size": 1024,
116
- "id2label": {
117
- "0": "LABEL_0",
118
- "1": "LABEL_1"
119
- },
120
- "image_size": 224,
121
- "initializer_factor": 1.0,
122
- "initializer_range": 0.02,
123
  "intermediate_size": 4096,
124
- "is_decoder": false,
125
- "is_encoder_decoder": false,
126
- "label2id": {
127
- "LABEL_0": 0,
128
- "LABEL_1": 1
129
- },
130
- "layer_norm_eps": 1e-05,
131
- "length_penalty": 1.0,
132
- "max_length": 20,
133
- "min_length": 0,
134
  "model_type": "clip_vision_model",
135
- "no_repeat_ngram_size": 0,
136
- "num_attention_heads": 16,
137
- "num_beam_groups": 1,
138
- "num_beams": 1,
139
- "num_channels": 3,
140
- "num_hidden_layers": 24,
141
- "num_return_sequences": 1,
142
- "output_attentions": false,
143
- "output_hidden_states": false,
144
- "output_scores": false,
145
- "pad_token_id": null,
146
- "patch_size": 14,
147
- "prefix": null,
148
- "problem_type": null,
149
- "projection_dim": 512,
150
- "pruned_heads": {},
151
- "remove_invalid_values": false,
152
- "repetition_penalty": 1.0,
153
- "return_dict": true,
154
- "return_dict_in_generate": false,
155
- "sep_token_id": null,
156
- "task_specific_params": null,
157
- "temperature": 1.0,
158
- "tf_legacy_loss": false,
159
- "tie_encoder_decoder": false,
160
- "tie_word_embeddings": true,
161
- "tokenizer_class": null,
162
- "top_k": 50,
163
- "top_p": 1.0,
164
- "torch_dtype": null,
165
- "torchscript": false,
166
- "transformers_version": "4.22.0.dev0",
167
- "typical_p": 1.0,
168
- "use_bfloat16": false
169
- },
170
- "vision_config_dict": {
171
- "hidden_size": 1024,
172
- "intermediate_size": 4096,
173
  "num_attention_heads": 16,
174
  "num_hidden_layers": 24,
175
  "patch_size": 14
 
1
  {
2
+ "_name_or_path": "/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/51388a731f57604945fddd703ecb5c50e8e7b49d/safety_checker",
 
3
  "architectures": [
4
  "StableDiffusionSafetyChecker"
5
  ],
 
8
  "model_type": "clip",
9
  "projection_dim": 768,
10
  "text_config": {
 
 
 
 
 
 
 
 
 
 
 
11
  "dropout": 0.0,
 
 
 
 
 
 
 
 
12
  "hidden_size": 768,
 
 
 
 
 
 
13
  "intermediate_size": 3072,
 
 
 
 
 
 
 
 
 
 
 
14
  "model_type": "clip_text_model",
15
+ "num_attention_heads": 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  },
17
  "torch_dtype": "float32",
18
+ "transformers_version": "4.36.2",
19
  "vision_config": {
 
 
 
 
 
 
 
 
 
 
 
20
  "dropout": 0.0,
 
 
 
 
 
 
 
 
21
  "hidden_size": 1024,
 
 
 
 
 
 
 
22
  "intermediate_size": 4096,
 
 
 
 
 
 
 
 
 
 
23
  "model_type": "clip_vision_model",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  "num_attention_heads": 16,
25
  "num_hidden_layers": 24,
26
  "patch_size": 14
safety_checker/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb351a5ded815c3ff744968ad9c6b218d071b9d313d04f35e813b84b4c0ffde8
3
+ size 1215979664
scheduler/scheduler_config.json CHANGED
@@ -1,13 +1,20 @@
1
  {
2
  "_class_name": "DDIMScheduler",
3
- "_diffusers_version": "0.6.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "clip_sample": false,
 
 
8
  "num_train_timesteps": 1000,
 
 
 
9
  "set_alpha_to_one": false,
 
10
  "steps_offset": 1,
11
- "trained_betas": null,
12
- "skip_prk_steps": true
 
13
  }
 
1
  {
2
  "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.25.0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
  "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
  "set_alpha_to_one": false,
15
+ "skip_prk_steps": true,
16
  "steps_offset": 1,
17
+ "thresholding": false,
18
+ "timestep_spacing": "leading",
19
+ "trained_betas": null
20
  }
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -20,6 +20,6 @@
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.22.0.dev0",
24
  "vocab_size": 49408
25
  }
 
1
  {
2
+ "_name_or_path": "runwayml/stable-diffusion-inpainting",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float32",
23
+ "transformers_version": "4.36.2",
24
  "vocab_size": 49408
25
  }
text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5624a5012acdc9b7e7a19ebe0b7aacb388bef790683ac86d5eecad5c4c54e0f
3
+ size 492265168
tokenizer/tokenizer_config.json CHANGED
@@ -1,34 +1,30 @@
1
  {
2
  "add_prefix_space": false,
3
- "bos_token": {
4
- "__type": "AddedToken",
5
- "content": "<|startoftext|>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false
 
 
 
 
 
 
 
 
 
 
10
  },
 
 
11
  "do_lower_case": true,
12
- "eos_token": {
13
- "__type": "AddedToken",
14
- "content": "<|endoftext|>",
15
- "lstrip": false,
16
- "normalized": true,
17
- "rstrip": false,
18
- "single_word": false
19
- },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
- "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
26
- "unk_token": {
27
- "__type": "AddedToken",
28
- "content": "<|endoftext|>",
29
- "lstrip": false,
30
- "normalized": true,
31
- "rstrip": false,
32
- "single_word": false
33
- }
34
  }
 
1
  {
2
  "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
  },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
  "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
 
 
 
 
 
 
 
25
  "errors": "replace",
26
  "model_max_length": 77,
 
27
  "pad_token": "<|endoftext|>",
 
28
  "tokenizer_class": "CLIPTokenizer",
29
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
30
  }
unet/config.json CHANGED
@@ -1,8 +1,13 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.6.0.dev0",
 
4
  "act_fn": "silu",
 
 
 
5
  "attention_head_dim": 8,
 
6
  "block_out_channels": [
7
  320,
8
  640,
@@ -10,7 +15,12 @@
10
  1280
11
  ],
12
  "center_input_sample": false,
 
 
 
 
13
  "cross_attention_dim": 768,
 
14
  "down_block_types": [
15
  "CrossAttnDownBlock2D",
16
  "CrossAttnDownBlock2D",
@@ -18,19 +28,41 @@
18
  "DownBlock2D"
19
  ],
20
  "downsample_padding": 1,
 
 
 
 
21
  "flip_sin_to_cos": true,
22
  "freq_shift": 0,
23
  "in_channels": 9,
24
  "layers_per_block": 2,
 
25
  "mid_block_scale_factor": 1,
 
26
  "norm_eps": 1e-05,
27
  "norm_num_groups": 32,
 
 
 
28
  "out_channels": 4,
 
 
 
 
 
29
  "sample_size": 64,
 
 
 
 
 
 
30
  "up_block_types": [
31
  "UpBlock2D",
32
  "CrossAttnUpBlock2D",
33
  "CrossAttnUpBlock2D",
34
  "CrossAttnUpBlock2D"
35
- ]
 
 
36
  }
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.25.0",
4
+ "_name_or_path": "runwayml/stable-diffusion-inpainting",
5
  "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "addition_time_embed_dim": null,
9
  "attention_head_dim": 8,
10
+ "attention_type": "default",
11
  "block_out_channels": [
12
  320,
13
  640,
 
15
  1280
16
  ],
17
  "center_input_sample": false,
18
+ "class_embed_type": null,
19
+ "class_embeddings_concat": false,
20
+ "conv_in_kernel": 3,
21
+ "conv_out_kernel": 3,
22
  "cross_attention_dim": 768,
23
+ "cross_attention_norm": null,
24
  "down_block_types": [
25
  "CrossAttnDownBlock2D",
26
  "CrossAttnDownBlock2D",
 
28
  "DownBlock2D"
29
  ],
30
  "downsample_padding": 1,
31
+ "dropout": 0.0,
32
+ "dual_cross_attention": false,
33
+ "encoder_hid_dim": null,
34
+ "encoder_hid_dim_type": null,
35
  "flip_sin_to_cos": true,
36
  "freq_shift": 0,
37
  "in_channels": 9,
38
  "layers_per_block": 2,
39
+ "mid_block_only_cross_attention": null,
40
  "mid_block_scale_factor": 1,
41
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
42
  "norm_eps": 1e-05,
43
  "norm_num_groups": 32,
44
+ "num_attention_heads": null,
45
+ "num_class_embeds": null,
46
+ "only_cross_attention": false,
47
  "out_channels": 4,
48
+ "projection_class_embeddings_input_dim": null,
49
+ "resnet_out_scale_factor": 1.0,
50
+ "resnet_skip_time_act": false,
51
+ "resnet_time_scale_shift": "default",
52
+ "reverse_transformer_layers_per_block": null,
53
  "sample_size": 64,
54
+ "time_cond_proj_dim": null,
55
+ "time_embedding_act_fn": null,
56
+ "time_embedding_dim": null,
57
+ "time_embedding_type": "positional",
58
+ "timestep_post_act": null,
59
+ "transformer_layers_per_block": 1,
60
  "up_block_types": [
61
  "UpBlock2D",
62
  "CrossAttnUpBlock2D",
63
  "CrossAttnUpBlock2D",
64
  "CrossAttnUpBlock2D"
65
+ ],
66
+ "upcast_attention": false,
67
+ "use_linear_projection": false
68
  }
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e26fff15c2f39327cae9d65b96126ed14a9b23ec516ae368e118c3dab28b4b0
3
+ size 3438225152
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.6.0.dev0",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
@@ -14,12 +15,14 @@
14
  "DownEncoderBlock2D",
15
  "DownEncoderBlock2D"
16
  ],
 
17
  "in_channels": 3,
18
  "latent_channels": 4,
19
  "layers_per_block": 2,
20
  "norm_num_groups": 32,
21
  "out_channels": 3,
22
  "sample_size": 256,
 
23
  "up_block_types": [
24
  "UpDecoderBlock2D",
25
  "UpDecoderBlock2D",
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.25.0",
4
+ "_name_or_path": "/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-inpainting/snapshots/51388a731f57604945fddd703ecb5c50e8e7b49d/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
 
15
  "DownEncoderBlock2D",
16
  "DownEncoderBlock2D"
17
  ],
18
+ "force_upcast": true,
19
  "in_channels": 3,
20
  "latent_channels": 4,
21
  "layers_per_block": 2,
22
  "norm_num_groups": 32,
23
  "out_channels": 3,
24
  "sample_size": 256,
25
+ "scaling_factor": 0.18215,
26
  "up_block_types": [
27
  "UpDecoderBlock2D",
28
  "UpDecoderBlock2D",
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4d2b5932bb4151e54e694fd31ccf51fca908223c9485bd56cd0e1d83ad94c49
3
+ size 334643268