valhalla commited on
Commit
200d326
1 Parent(s): 96a761e

add weights

Browse files
depth_estimator/config.json ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "./hf-models/stable-diffusion-2-depth/depth_estimator",
4
+ "architectures": [
5
+ "DPTForDepthEstimation"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "auxiliary_loss_weight": 0.4,
9
+ "backbone_config": {
10
+ "_name_or_path": "",
11
+ "add_cross_attention": false,
12
+ "architectures": null,
13
+ "bad_words_ids": null,
14
+ "begin_suppress_tokens": null,
15
+ "bos_token_id": null,
16
+ "chunk_size_feed_forward": 0,
17
+ "cross_attention_hidden_size": null,
18
+ "decoder_start_token_id": null,
19
+ "depths": [
20
+ 3,
21
+ 4,
22
+ 9
23
+ ],
24
+ "diversity_penalty": 0.0,
25
+ "do_sample": false,
26
+ "drop_path_rate": 0.0,
27
+ "early_stopping": false,
28
+ "embedding_dynamic_padding": true,
29
+ "embedding_size": 64,
30
+ "encoder_no_repeat_ngram_size": 0,
31
+ "eos_token_id": null,
32
+ "exponential_decay_length_penalty": null,
33
+ "finetuning_task": null,
34
+ "forced_bos_token_id": null,
35
+ "forced_eos_token_id": null,
36
+ "global_padding": "SAME",
37
+ "hidden_act": "relu",
38
+ "hidden_sizes": [
39
+ 256,
40
+ 512,
41
+ 1024,
42
+ 2048
43
+ ],
44
+ "id2label": {
45
+ "0": "LABEL_0",
46
+ "1": "LABEL_1"
47
+ },
48
+ "is_decoder": false,
49
+ "is_encoder_decoder": false,
50
+ "label2id": {
51
+ "LABEL_0": 0,
52
+ "LABEL_1": 1
53
+ },
54
+ "layer_type": "bottleneck",
55
+ "length_penalty": 1.0,
56
+ "max_length": 20,
57
+ "min_length": 0,
58
+ "model_type": "bit",
59
+ "no_repeat_ngram_size": 0,
60
+ "num_beam_groups": 1,
61
+ "num_beams": 1,
62
+ "num_channels": 3,
63
+ "num_groups": 32,
64
+ "num_return_sequences": 1,
65
+ "out_features": [
66
+ "stage1",
67
+ "stage2",
68
+ "stage3"
69
+ ],
70
+ "output_attentions": false,
71
+ "output_hidden_states": false,
72
+ "output_scores": false,
73
+ "output_stride": 32,
74
+ "pad_token_id": null,
75
+ "prefix": null,
76
+ "problem_type": null,
77
+ "pruned_heads": {},
78
+ "remove_invalid_values": false,
79
+ "repetition_penalty": 1.0,
80
+ "return_dict": true,
81
+ "return_dict_in_generate": false,
82
+ "sep_token_id": null,
83
+ "stage_names": [
84
+ "stem",
85
+ "stage1",
86
+ "stage2",
87
+ "stage3"
88
+ ],
89
+ "suppress_tokens": null,
90
+ "task_specific_params": null,
91
+ "temperature": 1.0,
92
+ "tf_legacy_loss": false,
93
+ "tie_encoder_decoder": false,
94
+ "tie_word_embeddings": true,
95
+ "tokenizer_class": null,
96
+ "top_k": 50,
97
+ "top_p": 1.0,
98
+ "torch_dtype": null,
99
+ "torchscript": false,
100
+ "transformers_version": "4.26.0.dev0",
101
+ "typical_p": 1.0,
102
+ "use_bfloat16": false,
103
+ "width_factor": 1
104
+ },
105
+ "backbone_out_indices": [
106
+ 2,
107
+ 5,
108
+ 8,
109
+ 11
110
+ ],
111
+ "embedding_type": "vit_hybrid",
112
+ "fusion_hidden_size": 256,
113
+ "head_in_index": -1,
114
+ "hidden_act": "gelu",
115
+ "hidden_dropout_prob": 0.0,
116
+ "hidden_size": 768,
117
+ "id2label": {
118
+ "0": "LABEL_0",
119
+ "1": "LABEL_1",
120
+ "2": "LABEL_2",
121
+ "3": "LABEL_3",
122
+ "4": "LABEL_4",
123
+ "5": "LABEL_5",
124
+ "6": "LABEL_6",
125
+ "7": "LABEL_7",
126
+ "8": "LABEL_8",
127
+ "9": "LABEL_9",
128
+ "10": "LABEL_10",
129
+ "11": "LABEL_11",
130
+ "12": "LABEL_12",
131
+ "13": "LABEL_13",
132
+ "14": "LABEL_14",
133
+ "15": "LABEL_15",
134
+ "16": "LABEL_16",
135
+ "17": "LABEL_17",
136
+ "18": "LABEL_18",
137
+ "19": "LABEL_19",
138
+ "20": "LABEL_20",
139
+ "21": "LABEL_21",
140
+ "22": "LABEL_22",
141
+ "23": "LABEL_23",
142
+ "24": "LABEL_24",
143
+ "25": "LABEL_25",
144
+ "26": "LABEL_26",
145
+ "27": "LABEL_27",
146
+ "28": "LABEL_28",
147
+ "29": "LABEL_29",
148
+ "30": "LABEL_30",
149
+ "31": "LABEL_31",
150
+ "32": "LABEL_32",
151
+ "33": "LABEL_33",
152
+ "34": "LABEL_34",
153
+ "35": "LABEL_35",
154
+ "36": "LABEL_36",
155
+ "37": "LABEL_37",
156
+ "38": "LABEL_38",
157
+ "39": "LABEL_39",
158
+ "40": "LABEL_40",
159
+ "41": "LABEL_41",
160
+ "42": "LABEL_42",
161
+ "43": "LABEL_43",
162
+ "44": "LABEL_44",
163
+ "45": "LABEL_45",
164
+ "46": "LABEL_46",
165
+ "47": "LABEL_47",
166
+ "48": "LABEL_48",
167
+ "49": "LABEL_49",
168
+ "50": "LABEL_50",
169
+ "51": "LABEL_51",
170
+ "52": "LABEL_52",
171
+ "53": "LABEL_53",
172
+ "54": "LABEL_54",
173
+ "55": "LABEL_55",
174
+ "56": "LABEL_56",
175
+ "57": "LABEL_57",
176
+ "58": "LABEL_58",
177
+ "59": "LABEL_59",
178
+ "60": "LABEL_60",
179
+ "61": "LABEL_61",
180
+ "62": "LABEL_62",
181
+ "63": "LABEL_63",
182
+ "64": "LABEL_64",
183
+ "65": "LABEL_65",
184
+ "66": "LABEL_66",
185
+ "67": "LABEL_67",
186
+ "68": "LABEL_68",
187
+ "69": "LABEL_69",
188
+ "70": "LABEL_70",
189
+ "71": "LABEL_71",
190
+ "72": "LABEL_72",
191
+ "73": "LABEL_73",
192
+ "74": "LABEL_74",
193
+ "75": "LABEL_75",
194
+ "76": "LABEL_76",
195
+ "77": "LABEL_77",
196
+ "78": "LABEL_78",
197
+ "79": "LABEL_79",
198
+ "80": "LABEL_80",
199
+ "81": "LABEL_81",
200
+ "82": "LABEL_82",
201
+ "83": "LABEL_83",
202
+ "84": "LABEL_84",
203
+ "85": "LABEL_85",
204
+ "86": "LABEL_86",
205
+ "87": "LABEL_87",
206
+ "88": "LABEL_88",
207
+ "89": "LABEL_89",
208
+ "90": "LABEL_90",
209
+ "91": "LABEL_91",
210
+ "92": "LABEL_92",
211
+ "93": "LABEL_93",
212
+ "94": "LABEL_94",
213
+ "95": "LABEL_95",
214
+ "96": "LABEL_96",
215
+ "97": "LABEL_97",
216
+ "98": "LABEL_98",
217
+ "99": "LABEL_99",
218
+ "100": "LABEL_100",
219
+ "101": "LABEL_101",
220
+ "102": "LABEL_102",
221
+ "103": "LABEL_103",
222
+ "104": "LABEL_104",
223
+ "105": "LABEL_105",
224
+ "106": "LABEL_106",
225
+ "107": "LABEL_107",
226
+ "108": "LABEL_108",
227
+ "109": "LABEL_109",
228
+ "110": "LABEL_110",
229
+ "111": "LABEL_111",
230
+ "112": "LABEL_112",
231
+ "113": "LABEL_113",
232
+ "114": "LABEL_114",
233
+ "115": "LABEL_115",
234
+ "116": "LABEL_116",
235
+ "117": "LABEL_117",
236
+ "118": "LABEL_118",
237
+ "119": "LABEL_119",
238
+ "120": "LABEL_120",
239
+ "121": "LABEL_121",
240
+ "122": "LABEL_122",
241
+ "123": "LABEL_123",
242
+ "124": "LABEL_124",
243
+ "125": "LABEL_125",
244
+ "126": "LABEL_126",
245
+ "127": "LABEL_127",
246
+ "128": "LABEL_128",
247
+ "129": "LABEL_129",
248
+ "130": "LABEL_130",
249
+ "131": "LABEL_131",
250
+ "132": "LABEL_132",
251
+ "133": "LABEL_133",
252
+ "134": "LABEL_134",
253
+ "135": "LABEL_135",
254
+ "136": "LABEL_136",
255
+ "137": "LABEL_137",
256
+ "138": "LABEL_138",
257
+ "139": "LABEL_139",
258
+ "140": "LABEL_140",
259
+ "141": "LABEL_141",
260
+ "142": "LABEL_142",
261
+ "143": "LABEL_143",
262
+ "144": "LABEL_144",
263
+ "145": "LABEL_145",
264
+ "146": "LABEL_146",
265
+ "147": "LABEL_147",
266
+ "148": "LABEL_148",
267
+ "149": "LABEL_149"
268
+ },
269
+ "image_size": 384,
270
+ "initializer_range": 0.02,
271
+ "intermediate_size": 3072,
272
+ "is_hybrid": true,
273
+ "label2id": {
274
+ "LABEL_0": 0,
275
+ "LABEL_1": 1,
276
+ "LABEL_10": 10,
277
+ "LABEL_100": 100,
278
+ "LABEL_101": 101,
279
+ "LABEL_102": 102,
280
+ "LABEL_103": 103,
281
+ "LABEL_104": 104,
282
+ "LABEL_105": 105,
283
+ "LABEL_106": 106,
284
+ "LABEL_107": 107,
285
+ "LABEL_108": 108,
286
+ "LABEL_109": 109,
287
+ "LABEL_11": 11,
288
+ "LABEL_110": 110,
289
+ "LABEL_111": 111,
290
+ "LABEL_112": 112,
291
+ "LABEL_113": 113,
292
+ "LABEL_114": 114,
293
+ "LABEL_115": 115,
294
+ "LABEL_116": 116,
295
+ "LABEL_117": 117,
296
+ "LABEL_118": 118,
297
+ "LABEL_119": 119,
298
+ "LABEL_12": 12,
299
+ "LABEL_120": 120,
300
+ "LABEL_121": 121,
301
+ "LABEL_122": 122,
302
+ "LABEL_123": 123,
303
+ "LABEL_124": 124,
304
+ "LABEL_125": 125,
305
+ "LABEL_126": 126,
306
+ "LABEL_127": 127,
307
+ "LABEL_128": 128,
308
+ "LABEL_129": 129,
309
+ "LABEL_13": 13,
310
+ "LABEL_130": 130,
311
+ "LABEL_131": 131,
312
+ "LABEL_132": 132,
313
+ "LABEL_133": 133,
314
+ "LABEL_134": 134,
315
+ "LABEL_135": 135,
316
+ "LABEL_136": 136,
317
+ "LABEL_137": 137,
318
+ "LABEL_138": 138,
319
+ "LABEL_139": 139,
320
+ "LABEL_14": 14,
321
+ "LABEL_140": 140,
322
+ "LABEL_141": 141,
323
+ "LABEL_142": 142,
324
+ "LABEL_143": 143,
325
+ "LABEL_144": 144,
326
+ "LABEL_145": 145,
327
+ "LABEL_146": 146,
328
+ "LABEL_147": 147,
329
+ "LABEL_148": 148,
330
+ "LABEL_149": 149,
331
+ "LABEL_15": 15,
332
+ "LABEL_16": 16,
333
+ "LABEL_17": 17,
334
+ "LABEL_18": 18,
335
+ "LABEL_19": 19,
336
+ "LABEL_2": 2,
337
+ "LABEL_20": 20,
338
+ "LABEL_21": 21,
339
+ "LABEL_22": 22,
340
+ "LABEL_23": 23,
341
+ "LABEL_24": 24,
342
+ "LABEL_25": 25,
343
+ "LABEL_26": 26,
344
+ "LABEL_27": 27,
345
+ "LABEL_28": 28,
346
+ "LABEL_29": 29,
347
+ "LABEL_3": 3,
348
+ "LABEL_30": 30,
349
+ "LABEL_31": 31,
350
+ "LABEL_32": 32,
351
+ "LABEL_33": 33,
352
+ "LABEL_34": 34,
353
+ "LABEL_35": 35,
354
+ "LABEL_36": 36,
355
+ "LABEL_37": 37,
356
+ "LABEL_38": 38,
357
+ "LABEL_39": 39,
358
+ "LABEL_4": 4,
359
+ "LABEL_40": 40,
360
+ "LABEL_41": 41,
361
+ "LABEL_42": 42,
362
+ "LABEL_43": 43,
363
+ "LABEL_44": 44,
364
+ "LABEL_45": 45,
365
+ "LABEL_46": 46,
366
+ "LABEL_47": 47,
367
+ "LABEL_48": 48,
368
+ "LABEL_49": 49,
369
+ "LABEL_5": 5,
370
+ "LABEL_50": 50,
371
+ "LABEL_51": 51,
372
+ "LABEL_52": 52,
373
+ "LABEL_53": 53,
374
+ "LABEL_54": 54,
375
+ "LABEL_55": 55,
376
+ "LABEL_56": 56,
377
+ "LABEL_57": 57,
378
+ "LABEL_58": 58,
379
+ "LABEL_59": 59,
380
+ "LABEL_6": 6,
381
+ "LABEL_60": 60,
382
+ "LABEL_61": 61,
383
+ "LABEL_62": 62,
384
+ "LABEL_63": 63,
385
+ "LABEL_64": 64,
386
+ "LABEL_65": 65,
387
+ "LABEL_66": 66,
388
+ "LABEL_67": 67,
389
+ "LABEL_68": 68,
390
+ "LABEL_69": 69,
391
+ "LABEL_7": 7,
392
+ "LABEL_70": 70,
393
+ "LABEL_71": 71,
394
+ "LABEL_72": 72,
395
+ "LABEL_73": 73,
396
+ "LABEL_74": 74,
397
+ "LABEL_75": 75,
398
+ "LABEL_76": 76,
399
+ "LABEL_77": 77,
400
+ "LABEL_78": 78,
401
+ "LABEL_79": 79,
402
+ "LABEL_8": 8,
403
+ "LABEL_80": 80,
404
+ "LABEL_81": 81,
405
+ "LABEL_82": 82,
406
+ "LABEL_83": 83,
407
+ "LABEL_84": 84,
408
+ "LABEL_85": 85,
409
+ "LABEL_86": 86,
410
+ "LABEL_87": 87,
411
+ "LABEL_88": 88,
412
+ "LABEL_89": 89,
413
+ "LABEL_9": 9,
414
+ "LABEL_90": 90,
415
+ "LABEL_91": 91,
416
+ "LABEL_92": 92,
417
+ "LABEL_93": 93,
418
+ "LABEL_94": 94,
419
+ "LABEL_95": 95,
420
+ "LABEL_96": 96,
421
+ "LABEL_97": 97,
422
+ "LABEL_98": 98,
423
+ "LABEL_99": 99
424
+ },
425
+ "layer_norm_eps": 1e-12,
426
+ "model_type": "dpt",
427
+ "neck_hidden_sizes": [
428
+ 256,
429
+ 512,
430
+ 768,
431
+ 768
432
+ ],
433
+ "num_attention_heads": 12,
434
+ "num_channels": 3,
435
+ "num_hidden_layers": 12,
436
+ "patch_size": 16,
437
+ "qkv_bias": true,
438
+ "readout_type": "project",
439
+ "reassemble_factors": [
440
+ 1,
441
+ 1,
442
+ 1,
443
+ 0.5
444
+ ],
445
+ "semantic_classifier_dropout": 0.1,
446
+ "semantic_loss_ignore_index": 255,
447
+ "torch_dtype": "float32",
448
+ "transformers_version": null,
449
+ "use_auxiliary_head": true,
450
+ "use_batch_norm_in_fusion_residual": false
451
+ }
depth_estimator/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ef8e608ccc456c76b813f349b2011024d1a490034c1960884208fc9fb67865d
3
+ size 489657285
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "ensure_multiple_of": 1,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "DPTImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "keep_aspect_ratio": false,
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 384,
22
+ "width": 384
23
+ }
24
+ }
model_index.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionDepth2ImgPipeline",
3
+ "_diffusers_version": "0.10.0.dev0",
4
+ "depth_estimator": [
5
+ "transformers",
6
+ "DPTForDepthEstimation"
7
+ ],
8
+ "feature_extractor": [
9
+ "transformers",
10
+ "DPTImageProcessor"
11
+ ],
12
+ "scheduler": [
13
+ "diffusers",
14
+ "PNDMScheduler"
15
+ ],
16
+ "text_encoder": [
17
+ "transformers",
18
+ "CLIPTextModel"
19
+ ],
20
+ "tokenizer": [
21
+ "transformers",
22
+ "CLIPTokenizer"
23
+ ],
24
+ "unet": [
25
+ "diffusers",
26
+ "UNet2DConditionModel"
27
+ ],
28
+ "vae": [
29
+ "diffusers",
30
+ "AutoencoderKL"
31
+ ]
32
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.10.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "prediction_type": "epsilon",
10
+ "set_alpha_to_one": false,
11
+ "skip_prk_steps": true,
12
+ "steps_offset": 1,
13
+ "trained_betas": null
14
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./hf-models/stable-diffusion-2-depth/text_encoder",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 23,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.26.0.dev0",
24
+ "vocab_size": 49408
25
+ }
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9c787e9388134c1a25dc69934a51a32a2683b38b8a9b017e1f3a692b8ed6b98
3
+ size 1361679905
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "do_lower_case": true,
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 77,
22
+ "name_or_path": "./hf-models/stable-diffusion-2-depth/tokenizer",
23
+ "pad_token": "<|endoftext|>",
24
+ "special_tokens_map_file": "./special_tokens_map.json",
25
+ "tokenizer_class": "CLIPTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.10.0.dev0",
4
+ "_name_or_path": "./hf-models/stable-diffusion-2-depth/unet",
5
+ "act_fn": "silu",
6
+ "attention_head_dim": [
7
+ 5,
8
+ 10,
9
+ 20,
10
+ 20
11
+ ],
12
+ "block_out_channels": [
13
+ 320,
14
+ 640,
15
+ 1280,
16
+ 1280
17
+ ],
18
+ "center_input_sample": false,
19
+ "cross_attention_dim": 1024,
20
+ "down_block_types": [
21
+ "CrossAttnDownBlock2D",
22
+ "CrossAttnDownBlock2D",
23
+ "CrossAttnDownBlock2D",
24
+ "DownBlock2D"
25
+ ],
26
+ "downsample_padding": 1,
27
+ "dual_cross_attention": false,
28
+ "flip_sin_to_cos": true,
29
+ "freq_shift": 0,
30
+ "in_channels": 5,
31
+ "layers_per_block": 2,
32
+ "mid_block_scale_factor": 1,
33
+ "norm_eps": 1e-05,
34
+ "norm_num_groups": 32,
35
+ "num_class_embeds": null,
36
+ "only_cross_attention": false,
37
+ "out_channels": 4,
38
+ "sample_size": 32,
39
+ "up_block_types": [
40
+ "UpBlock2D",
41
+ "CrossAttnUpBlock2D",
42
+ "CrossAttnUpBlock2D",
43
+ "CrossAttnUpBlock2D"
44
+ ],
45
+ "use_linear_projection": true
46
+ }
unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8564f86e7f6c919cd53e5782c8920c3720cdeb44e12c8e38dd956825183c959c
3
+ size 3463946213
vae/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.10.0.dev0",
4
+ "_name_or_path": "./hf-models/stable-diffusion-2-depth/vae",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 256,
24
+ "up_block_types": [
25
+ "UpDecoderBlock2D",
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D"
29
+ ]
30
+ }
vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4302e1efa25f3a47ceb7536bc335715ad9d1f203e90c2d25507600d74006e89
3
+ size 334715313