pengHTYX commited on
Commit
ecd72f1
1 Parent(s): cbf3afe
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 224
26
+ }
27
+ }
image_encoder/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "stabilityai/stable-diffusion-2-1-unclip",
3
+ "architectures": [
4
+ "CLIPVisionModelWithProjection"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "dropout": 0.0,
8
+ "hidden_act": "gelu",
9
+ "hidden_size": 1280,
10
+ "image_size": 224,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "model_type": "clip_vision_model",
16
+ "num_attention_heads": 16,
17
+ "num_channels": 3,
18
+ "num_hidden_layers": 32,
19
+ "patch_size": 14,
20
+ "projection_dim": 1024,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.37.2"
23
+ }
image_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae616c24393dd1854372b0639e5541666f7521cbe219669255e865cb7f89466a
3
+ size 1264217240
image_noising_scheduler/scheduler_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDPMScheduler",
3
+ "_diffusers_version": "0.26.0",
4
+ "beta_end": 0.02,
5
+ "beta_schedule": "squaredcos_cap_v2",
6
+ "beta_start": 0.0001,
7
+ "clip_sample": true,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "steps_offset": 0,
15
+ "thresholding": false,
16
+ "timestep_spacing": "leading",
17
+ "trained_betas": null,
18
+ "variance_type": "fixed_small"
19
+ }
image_normalizer/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableUnCLIPImageNormalizer",
3
+ "_diffusers_version": "0.26.0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-1-unclip",
5
+ "embedding_dim": 1024
6
+ }
image_normalizer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7772cf09639cea0c65639a3bfc88004a66d42259090d03fa8e15efdc255f240a
3
+ size 4272
model_index.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableUnCLIPImg2ImgPipeline",
3
+ "_diffusers_version": "0.26.0",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPImageProcessor"
7
+ ],
8
+ "image_encoder": [
9
+ "transformers",
10
+ "CLIPVisionModelWithProjection"
11
+ ],
12
+ "image_noising_scheduler": [
13
+ "diffusers",
14
+ "DDPMScheduler"
15
+ ],
16
+ "image_normalizer": [
17
+ "stable_diffusion",
18
+ "StableUnCLIPImageNormalizer"
19
+ ],
20
+ "scheduler": [
21
+ "diffusers",
22
+ "DDIMScheduler"
23
+ ],
24
+ "text_encoder": [
25
+ "transformers",
26
+ "CLIPTextModel"
27
+ ],
28
+ "tokenizer": [
29
+ "transformers",
30
+ "CLIPTokenizer"
31
+ ],
32
+ "unet": [
33
+ "mvdiffusion.models.unet_mv2d_condition",
34
+ "UNetMV2DConditionModel"
35
+ ],
36
+ "vae": [
37
+ "diffusers",
38
+ "AutoencoderKL"
39
+ ]
40
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.26.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "v_prediction",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "skip_prk_steps": true,
16
+ "steps_offset": 1,
17
+ "thresholding": false,
18
+ "timestep_spacing": "leading",
19
+ "trained_betas": null
20
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "stabilityai/stable-diffusion-2-1-unclip",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 23,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.37.2",
24
+ "vocab_size": 49408
25
+ }
text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc1827c465450322616f06dea41596eac7d493f4e95904dcb51f0fc745c4e13f
3
+ size 680820392
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNetMV2DConditionModel",
3
+ "_diffusers_version": "0.26.0",
4
+ "_name_or_path": "../era3d_checkpoint_backup/output/unit-unclip-512-6view-8w-selfcd-rowwisemv-linear-steplr-onlyortho/checkpoint-21250/unet_ema",
5
+ "act_fn": "silu",
6
+ "addition_channels": [
7
+ 1280,
8
+ 1280,
9
+ 1280
10
+ ],
11
+ "addition_downsample": false,
12
+ "addition_embed_type": null,
13
+ "addition_embed_type_num_heads": 64,
14
+ "addition_time_embed_dim": null,
15
+ "attention_head_dim": [
16
+ 5,
17
+ 10,
18
+ 20,
19
+ 20
20
+ ],
21
+ "block_out_channels": [
22
+ 320,
23
+ 640,
24
+ 1280,
25
+ 1280
26
+ ],
27
+ "cd_attention_last": false,
28
+ "cd_attention_mid": false,
29
+ "center_input_sample": false,
30
+ "class_embed_type": "projection",
31
+ "class_embeddings_concat": false,
32
+ "conv_in_kernel": 3,
33
+ "conv_out_kernel": 3,
34
+ "cross_attention_dim": 1024,
35
+ "cross_attention_norm": null,
36
+ "decay": 0.9999,
37
+ "down_block_types": [
38
+ "CrossAttnDownBlockMV2D",
39
+ "CrossAttnDownBlockMV2D",
40
+ "CrossAttnDownBlockMV2D",
41
+ "DownBlock2D"
42
+ ],
43
+ "downsample_padding": 1,
44
+ "dual_cross_attention": false,
45
+ "encoder_hid_dim": null,
46
+ "encoder_hid_dim_type": null,
47
+ "flip_sin_to_cos": true,
48
+ "freq_shift": 0,
49
+ "in_channels": 8,
50
+ "inv_gamma": 1.0,
51
+ "layers_per_block": 2,
52
+ "mid_block_only_cross_attention": null,
53
+ "mid_block_scale_factor": 1,
54
+ "mid_block_type": "UNetMidBlockMV2DCrossAttn",
55
+ "min_decay": 0.0,
56
+ "multiview_attention": true,
57
+ "mvcd_attention": true,
58
+ "norm_eps": 1e-05,
59
+ "norm_num_groups": 32,
60
+ "num_attention_heads": null,
61
+ "num_class_embeds": null,
62
+ "num_regress_blocks": 3,
63
+ "num_views": 6,
64
+ "only_cross_attention": false,
65
+ "optimization_step": 6250,
66
+ "out_channels": 4,
67
+ "power": 0.6666666666666666,
68
+ "projection_camera_embeddings_input_dim": 4,
69
+ "projection_class_embeddings_input_dim": 2048,
70
+ "regress_elevation": false,
71
+ "regress_focal_length": false,
72
+ "resnet_out_scale_factor": 1.0,
73
+ "resnet_skip_time_act": false,
74
+ "resnet_time_scale_shift": "default",
75
+ "sample_size": 64,
76
+ "selfattn_block": "self_rowwise",
77
+ "sparse_mv_attention": true,
78
+ "time_cond_proj_dim": null,
79
+ "time_embedding_act_fn": null,
80
+ "time_embedding_dim": null,
81
+ "time_embedding_type": "positional",
82
+ "timestep_post_act": null,
83
+ "transformer_layers_per_block": 1,
84
+ "up_block_types": [
85
+ "UpBlock2D",
86
+ "CrossAttnUpBlockMV2D",
87
+ "CrossAttnUpBlockMV2D",
88
+ "CrossAttnUpBlockMV2D"
89
+ ],
90
+ "upcast_attention": true,
91
+ "update_after_step": 0,
92
+ "use_dino": false,
93
+ "use_ema_warmup": false,
94
+ "use_linear_projection": true
95
+ }
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:591420f475c26575f3d7e57efe4709272c4ad13fafcbff607cd3682d85dfb83c
3
+ size 3679234408
vae/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.26.0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-1-unclip",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 768,
25
+ "scaling_factor": 0.18215,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e4c08995484ee61270175e9e7a072b66a6e4eeb5f0c266667fe1f45b90daf9a
3
+ size 167335342