Nadda commited on
Commit
903ffa8
1 Parent(s): b5fb996

Upload StableDiffusionXLPipeline

Browse files
model_index.json CHANGED
@@ -1,32 +1,36 @@
1
  {
2
- "_class_name": "StableDiffusionInpaintPipeline",
3
  "_diffusers_version": "0.27.2",
4
- "_name_or_path": "/content/Orion/Logs",
5
  "feature_extractor": [
6
- "transformers",
7
- "CLIPImageProcessor"
8
- ],
9
- "image_encoder": [
10
  null,
11
  null
12
  ],
13
- "requires_safety_checker": false,
14
- "safety_checker": [
15
  null,
16
  null
17
  ],
18
  "scheduler": [
19
  "diffusers",
20
- "PNDMScheduler"
21
  ],
22
  "text_encoder": [
23
  "transformers",
24
  "CLIPTextModel"
25
  ],
 
 
 
 
26
  "tokenizer": [
27
  "transformers",
28
  "CLIPTokenizer"
29
  ],
 
 
 
 
30
  "unet": [
31
  "diffusers",
32
  "UNet2DConditionModel"
 
1
  {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
  "_diffusers_version": "0.27.2",
4
+ "_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
5
  "feature_extractor": [
 
 
 
 
6
  null,
7
  null
8
  ],
9
+ "force_zeros_for_empty_prompt": true,
10
+ "image_encoder": [
11
  null,
12
  null
13
  ],
14
  "scheduler": [
15
  "diffusers",
16
+ "EulerDiscreteScheduler"
17
  ],
18
  "text_encoder": [
19
  "transformers",
20
  "CLIPTextModel"
21
  ],
22
+ "text_encoder_2": [
23
+ "transformers",
24
+ "CLIPTextModelWithProjection"
25
+ ],
26
  "tokenizer": [
27
  "transformers",
28
  "CLIPTokenizer"
29
  ],
30
+ "tokenizer_2": [
31
+ "transformers",
32
+ "CLIPTokenizer"
33
+ ],
34
  "unet": [
35
  "diffusers",
36
  "UNet2DConditionModel"
scheduler/scheduler_config.json CHANGED
@@ -1,15 +1,22 @@
1
  {
2
- "_class_name": "PNDMScheduler",
3
  "_diffusers_version": "0.27.2",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "clip_sample": false,
 
8
  "num_train_timesteps": 1000,
9
  "prediction_type": "epsilon",
 
 
10
  "set_alpha_to_one": false,
 
 
11
  "skip_prk_steps": true,
12
  "steps_offset": 1,
13
  "timestep_spacing": "leading",
14
- "trained_betas": null
 
 
15
  }
 
1
  {
2
+ "_class_name": "EulerDiscreteScheduler",
3
  "_diffusers_version": "0.27.2",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "clip_sample": false,
8
+ "interpolation_type": "linear",
9
  "num_train_timesteps": 1000,
10
  "prediction_type": "epsilon",
11
+ "rescale_betas_zero_snr": false,
12
+ "sample_max_value": 1.0,
13
  "set_alpha_to_one": false,
14
+ "sigma_max": null,
15
+ "sigma_min": null,
16
  "skip_prk_steps": true,
17
  "steps_offset": 1,
18
  "timestep_spacing": "leading",
19
+ "timestep_type": "discrete",
20
+ "trained_betas": null,
21
+ "use_karras_sigmas": false
22
  }
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/content/Orion/Logs/text_encoder",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -7,19 +7,19 @@
7
  "bos_token_id": 0,
8
  "dropout": 0.0,
9
  "eos_token_id": 2,
10
- "hidden_act": "gelu",
11
- "hidden_size": 1024,
12
  "initializer_factor": 1.0,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 4096,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 77,
17
  "model_type": "clip_text_model",
18
- "num_attention_heads": 16,
19
- "num_hidden_layers": 23,
20
  "pad_token_id": 1,
21
- "projection_dim": 512,
22
  "torch_dtype": "float16",
23
- "transformers_version": "4.41.1",
24
  "vocab_size": 49408
25
  }
 
1
  {
2
+ "_name_or_path": "/root/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/text_encoder",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
7
  "bos_token_id": 0,
8
  "dropout": 0.0,
9
  "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
  "initializer_factor": 1.0,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 77,
17
  "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
+ "projection_dim": 768,
22
  "torch_dtype": "float16",
23
+ "transformers_version": "4.41.2",
24
  "vocab_size": 49408
25
  }
text_encoder/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc1827c465450322616f06dea41596eac7d493f4e95904dcb51f0fc745c4e13f
3
- size 680820392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd
3
+ size 246144152
text_encoder_2/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/text_encoder_2",
3
+ "architectures": [
4
+ "CLIPTextModelWithProjection"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1280,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5120,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 20,
19
+ "num_hidden_layers": 32,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 1280,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.41.2",
24
+ "vocab_size": 49408
25
+ }
text_encoder_2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4
3
+ size 1389382176
tokenizer/special_tokens_map.json CHANGED
@@ -13,13 +13,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": {
17
- "content": "!",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
  "unk_token": {
24
  "content": "<|endoftext|>",
25
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|endoftext|>",
 
 
 
 
 
 
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
tokenizer/tokenizer_config.json CHANGED
@@ -1,14 +1,6 @@
1
  {
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
- "0": {
5
- "content": "!",
6
- "lstrip": false,
7
- "normalized": false,
8
- "rstrip": false,
9
- "single_word": false,
10
- "special": true
11
- },
12
  "49406": {
13
  "content": "<|startoftext|>",
14
  "lstrip": false,
@@ -32,7 +24,7 @@
32
  "eos_token": "<|endoftext|>",
33
  "errors": "replace",
34
  "model_max_length": 77,
35
- "pad_token": "!",
36
  "tokenizer_class": "CLIPTokenizer",
37
  "unk_token": "<|endoftext|>"
38
  }
 
1
  {
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
 
 
 
 
 
 
 
 
4
  "49406": {
5
  "content": "<|startoftext|>",
6
  "lstrip": false,
 
24
  "eos_token": "<|endoftext|>",
25
  "errors": "replace",
26
  "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
  "tokenizer_class": "CLIPTokenizer",
29
  "unk_token": "<|endoftext|>"
30
  }
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json CHANGED
@@ -1,22 +1,20 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.27.2",
4
- "_name_or_path": "/content/Orion/Logs/unet",
5
  "act_fn": "silu",
6
- "addition_embed_type": null,
7
  "addition_embed_type_num_heads": 64,
8
- "addition_time_embed_dim": null,
9
  "attention_head_dim": [
10
  5,
11
  10,
12
- 20,
13
  20
14
  ],
15
  "attention_type": "default",
16
  "block_out_channels": [
17
  320,
18
  640,
19
- 1280,
20
  1280
21
  ],
22
  "center_input_sample": false,
@@ -24,13 +22,12 @@
24
  "class_embeddings_concat": false,
25
  "conv_in_kernel": 3,
26
  "conv_out_kernel": 3,
27
- "cross_attention_dim": 1024,
28
  "cross_attention_norm": null,
29
  "down_block_types": [
 
30
  "CrossAttnDownBlock2D",
31
- "CrossAttnDownBlock2D",
32
- "CrossAttnDownBlock2D",
33
- "DownBlock2D"
34
  ],
35
  "downsample_padding": 1,
36
  "dropout": 0.0,
@@ -39,7 +36,7 @@
39
  "encoder_hid_dim_type": null,
40
  "flip_sin_to_cos": true,
41
  "freq_shift": 0,
42
- "in_channels": 9,
43
  "layers_per_block": 2,
44
  "mid_block_only_cross_attention": null,
45
  "mid_block_scale_factor": 1,
@@ -50,24 +47,27 @@
50
  "num_class_embeds": null,
51
  "only_cross_attention": false,
52
  "out_channels": 4,
53
- "projection_class_embeddings_input_dim": null,
54
  "resnet_out_scale_factor": 1.0,
55
  "resnet_skip_time_act": false,
56
  "resnet_time_scale_shift": "default",
57
  "reverse_transformer_layers_per_block": null,
58
- "sample_size": 64,
59
  "time_cond_proj_dim": null,
60
  "time_embedding_act_fn": null,
61
  "time_embedding_dim": null,
62
  "time_embedding_type": "positional",
63
  "timestep_post_act": null,
64
- "transformer_layers_per_block": 1,
 
 
 
 
65
  "up_block_types": [
66
- "UpBlock2D",
67
  "CrossAttnUpBlock2D",
68
  "CrossAttnUpBlock2D",
69
- "CrossAttnUpBlock2D"
70
  ],
71
- "upcast_attention": false,
72
  "use_linear_projection": true
73
  }
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
  "_diffusers_version": "0.27.2",
4
+ "_name_or_path": "/root/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/unet",
5
  "act_fn": "silu",
6
+ "addition_embed_type": "text_time",
7
  "addition_embed_type_num_heads": 64,
8
+ "addition_time_embed_dim": 256,
9
  "attention_head_dim": [
10
  5,
11
  10,
 
12
  20
13
  ],
14
  "attention_type": "default",
15
  "block_out_channels": [
16
  320,
17
  640,
 
18
  1280
19
  ],
20
  "center_input_sample": false,
 
22
  "class_embeddings_concat": false,
23
  "conv_in_kernel": 3,
24
  "conv_out_kernel": 3,
25
+ "cross_attention_dim": 2048,
26
  "cross_attention_norm": null,
27
  "down_block_types": [
28
+ "DownBlock2D",
29
  "CrossAttnDownBlock2D",
30
+ "CrossAttnDownBlock2D"
 
 
31
  ],
32
  "downsample_padding": 1,
33
  "dropout": 0.0,
 
36
  "encoder_hid_dim_type": null,
37
  "flip_sin_to_cos": true,
38
  "freq_shift": 0,
39
+ "in_channels": 4,
40
  "layers_per_block": 2,
41
  "mid_block_only_cross_attention": null,
42
  "mid_block_scale_factor": 1,
 
47
  "num_class_embeds": null,
48
  "only_cross_attention": false,
49
  "out_channels": 4,
50
+ "projection_class_embeddings_input_dim": 2816,
51
  "resnet_out_scale_factor": 1.0,
52
  "resnet_skip_time_act": false,
53
  "resnet_time_scale_shift": "default",
54
  "reverse_transformer_layers_per_block": null,
55
+ "sample_size": 128,
56
  "time_cond_proj_dim": null,
57
  "time_embedding_act_fn": null,
58
  "time_embedding_dim": null,
59
  "time_embedding_type": "positional",
60
  "timestep_post_act": null,
61
+ "transformer_layers_per_block": [
62
+ 1,
63
+ 2,
64
+ 10
65
+ ],
66
  "up_block_types": [
 
67
  "CrossAttnUpBlock2D",
68
  "CrossAttnUpBlock2D",
69
+ "UpBlock2D"
70
  ],
71
+ "upcast_attention": null,
72
  "use_linear_projection": true
73
  }
unet/diffusion_pytorch_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ece4bb68f71960f1d47d334620e1460d181c45dfe5ac17a08351cd2844de701
3
- size 1731933536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de8a229d07acb65d8edf22925337c687b4089172f2279835edaf6bc0c33e284d
3
+ size 5146944008
vae/config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.27.2",
4
- "_name_or_path": "/content/Orion/Logs/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
@@ -23,8 +23,8 @@
23
  "layers_per_block": 2,
24
  "norm_num_groups": 32,
25
  "out_channels": 3,
26
- "sample_size": 512,
27
- "scaling_factor": 0.18215,
28
  "up_block_types": [
29
  "UpDecoderBlock2D",
30
  "UpDecoderBlock2D",
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.27.2",
4
+ "_name_or_path": "/root/.cache/huggingface/hub/models--stabilityai--stable-diffusion-xl-base-1.0/snapshots/462165984030d82259a11f4367a4eed129e94a7b/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
 
23
  "layers_per_block": 2,
24
  "norm_num_groups": 32,
25
  "out_channels": 3,
26
+ "sample_size": 1024,
27
+ "scaling_factor": 0.13025,
28
  "up_block_types": [
29
  "UpDecoderBlock2D",
30
  "UpDecoderBlock2D",
vae/diffusion_pytorch_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e4c08995484ee61270175e9e7a072b66a6e4eeb5f0c266667fe1f45b90daf9a
3
  size 167335342
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcb60880a46b63dea58e9bc591abe15f8350bde47b405f9c38f4be70c6161e68
3
  size 167335342