saddam213 commited on
Commit
421fe98
1 Parent(s): e3f5a54

Initial Upload

Browse files
.gitattributes CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ controlnet/model.onnx.data filter=lfs diff=lfs merge=lfs -text
37
+ Icon.png filter=lfs diff=lfs merge=lfs -text
38
+ Sample.png filter=lfs diff=lfs merge=lfs -text
39
+ Sample2.png filter=lfs diff=lfs merge=lfs -text
40
+ Sample3.png filter=lfs diff=lfs merge=lfs -text
41
+ Sample4.png filter=lfs diff=lfs merge=lfs -text
42
+ unet/model.onnx.data filter=lfs diff=lfs merge=lfs -text
Icon.png ADDED

Git LFS Details

  • SHA256: b2fe26c150596ab2ed1b7da72ca5b380ac8ce5de470ec3c080da84e663f83cd0
  • Pointer size: 132 Bytes
  • Size of remote file: 1.57 MB
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Midgard Pony v3 - Onnx Olive DirectML Optimized
2
+
3
+ ## Original Model
4
+ https://civitai.com/models/470287?modelVersionId=561310
5
+
6
+ ## C# Inference Demo
7
+ https://github.com/TensorStack-AI/OnnxStack
8
+
9
+ ```csharp
10
+ // Create Pipeline
11
+ var pipeline = StableDiffusionXLPipeline.CreatePipeline("D:\\Models\\MidgardPony-XL");
12
+
13
+ // Prompt
14
+ var promptOptions = new PromptOptions
15
+ {
16
+ Prompt = "Craft an image of a gallant furry prince, with a charming smile and a sword at his side, ready to embark on a quest"
17
+ };
18
+
19
+ // Run pipeline
20
+ var result = await pipeline.GenerateImageAsync(promptOptions);
21
+
22
+ // Save Image Result
23
+ await result.SaveAsync("Result.png");
24
+ ```
25
+ ## Inference Result
26
+ ![Intro Image](Sample.png)
Sample.png ADDED

Git LFS Details

  • SHA256: b96caf002ea6ee88a0e5011631cdfe3dbab732430d954550857dfae06a6db9b2
  • Pointer size: 132 Bytes
  • Size of remote file: 1.9 MB
Sample2.png ADDED

Git LFS Details

  • SHA256: 2951597f93bda6ee8094303e36b1c6477f665dd79aec8ebce90c611bc52f0024
  • Pointer size: 132 Bytes
  • Size of remote file: 1.75 MB
Sample3.png ADDED

Git LFS Details

  • SHA256: caac10f93727ca8ab5e5bc76c74a088d955bf223749881648cc79adc7c221265
  • Pointer size: 132 Bytes
  • Size of remote file: 1.76 MB
Sample4.png ADDED

Git LFS Details

  • SHA256: 7463ec51eae01e74d2c1f3855a7a725489fe242269aa9a6dfd18a7e6d3526f33
  • Pointer size: 132 Bytes
  • Size of remote file: 1.56 MB
controlnet/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "attention_type": "default",
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "cross_attention_dim": 2048,
25
+ "cross_attention_norm": null,
26
+ "down_block_types": [
27
+ "DownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "dropout": 0.0,
33
+ "dual_cross_attention": false,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "in_channels": 4,
39
+ "layers_per_block": 2,
40
+ "mid_block_only_cross_attention": null,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "out_channels": 4,
49
+ "projection_class_embeddings_input_dim": 2816,
50
+ "resnet_out_scale_factor": 1.0,
51
+ "resnet_skip_time_act": false,
52
+ "resnet_time_scale_shift": "default",
53
+ "reverse_transformer_layers_per_block": null,
54
+ "sample_size": 128,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "transformer_layers_per_block": [
61
+ 1,
62
+ 2,
63
+ 10
64
+ ],
65
+ "up_block_types": [
66
+ "CrossAttnUpBlock2D",
67
+ "CrossAttnUpBlock2D",
68
+ "UpBlock2D"
69
+ ],
70
+ "upcast_attention": null,
71
+ "use_linear_projection": true
72
+ }
controlnet/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c5ca20f6a883406ea4d3a54175d3084d1ea4a2fc8fa0c8deeab658c9f59a07c
3
+ size 1244445
controlnet/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1ce7d1b7e4322e9a303a9e596285ec714daa985cb2e3f7d111e7756afb2401a
3
+ size 5134903040
model_index.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "feature_extractor": [
5
+ null,
6
+ null
7
+ ],
8
+ "force_zeros_for_empty_prompt": true,
9
+ "image_encoder": [
10
+ null,
11
+ null
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "EulerDiscreteScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "transformers",
19
+ "CLIPTextModel"
20
+ ],
21
+ "text_encoder_2": [
22
+ "transformers",
23
+ "CLIPTextModelWithProjection"
24
+ ],
25
+ "tokenizer": [
26
+ "transformers",
27
+ "CLIPTokenizer"
28
+ ],
29
+ "tokenizer_2": [
30
+ "transformers",
31
+ "CLIPTokenizer"
32
+ ],
33
+ "unet": [
34
+ "diffusers",
35
+ "UNet2DConditionModel"
36
+ ],
37
+ "vae": [
38
+ "diffusers",
39
+ "AutoencoderKL"
40
+ ]
41
+ }
model_template.json ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Id": "FEF994D2-7156-4BB7-9CC6-C8DFD449D26A",
3
+ "FileVersion": "1",
4
+ "Created": "2024-06-016T00:00:00",
5
+ "Name": "Midgard Pony XL",
6
+ "ImageIcon": "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/Icon.png",
7
+ "Author": "freek22",
8
+ "Description": "This is a subvariant based on my General Use model Midgard made for Animals/Creatures and Furry Art but can do way more\nMidgard, in Norse mythology, the Middle Earth, the abode of mankind, made from the body of the first created being, the giant Aurgelmir (Ymir). According to legend, the gods killed Aurgelmir, rolled his body into the central void of the universe, and began fashioning the Midgard our earth....",
9
+ "Rank": 311,
10
+ "Group": "Online",
11
+ "Template": "SDXL",
12
+ "Category": "StableDiffusion",
13
+ "StableDiffusionTemplate": {
14
+ "PipelineType": "StableDiffusionXL",
15
+ "ModelType": "Base",
16
+ "SampleSize": 1024,
17
+ "TokenizerLength": 768,
18
+ "DiffuserTypes": [
19
+ "TextToImage",
20
+ "ImageToImage",
21
+ "ImageInpaintLegacy"
22
+ ],
23
+ "SchedulerDefaults": {
24
+ "SchedulerType": "EulerAncestral",
25
+ "Steps": 30,
26
+ "StepsMin": 4,
27
+ "StepsMax": 100,
28
+ "Guidance": 5,
29
+ "GuidanceMin": 0,
30
+ "GuidanceMax": 30,
31
+ "TimestepSpacing": "Linspace",
32
+ "BetaSchedule": "ScaledLinear",
33
+ "BetaStart": 0.00085,
34
+ "BetaEnd": 0.011
35
+ }
36
+ },
37
+ "Precision": "F16",
38
+ "MemoryMin": 6,
39
+ "MemoryMax": 16.4,
40
+ "DownloadSize": 11.2,
41
+ "Website": "https://civitai.com/models/470287",
42
+ "Repository": "https://huggingface.co/TensorStack/MidgardPony-XL-onnx",
43
+ "RepositoryFiles": [
44
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/controlnet/model.onnx",
45
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/controlnet/model.onnx.data",
46
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/controlnet/config.json",
47
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/scheduler/scheduler_config.json",
48
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/text_encoder/model.onnx",
49
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/text_encoder/config.json",
50
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/text_encoder_2/model.onnx",
51
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/text_encoder_2/config.json",
52
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer/merges.txt",
53
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer/model.onnx",
54
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer/special_tokens_map.json",
55
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer/tokenizer_config.json",
56
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer/vocab.json",
57
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer_2/merges.txt",
58
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer_2/model.onnx",
59
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer_2/special_tokens_map.json",
60
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer_2/tokenizer_config.json",
61
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/tokenizer_2/vocab.json",
62
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/unet/model.onnx",
63
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/unet/model.onnx.data",
64
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/unet/config.json",
65
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/vae_decoder/model.onnx",
66
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/vae_decoder/config.json",
67
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/vae_encoder/model.onnx",
68
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/vae_encoder/config.json",
69
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/model_index.json",
70
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/model_template.json",
71
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/README.md"
72
+ ],
73
+ "PreviewImages": [
74
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/Sample.png",
75
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/Sample2.png",
76
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/Sample3.png",
77
+ "https://huggingface.co/TensorStack/MidgardPony-XL-onnx/resolve/main/Sample4.png"
78
+ ],
79
+ "Tags": [
80
+ "GPU",
81
+ "F16"
82
+ ]
83
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "final_sigmas_type": "zero",
9
+ "interpolation_type": "linear",
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "sigma_max": null,
16
+ "sigma_min": null,
17
+ "skip_prk_steps": true,
18
+ "steps_offset": 1,
19
+ "timestep_spacing": "leading",
20
+ "timestep_type": "discrete",
21
+ "trained_betas": null,
22
+ "use_karras_sigmas": false
23
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "quick_gelu",
10
+ "hidden_size": 768,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.41.2",
23
+ "vocab_size": 49408
24
+ }
text_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7480e0f7040a404c1d5b978a14fd7d79916c6467e337a366ca3f3f909344a50b
3
+ size 246481009
text_encoder_2/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1280,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 20,
18
+ "num_hidden_layers": 32,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 1280,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.41.2",
23
+ "vocab_size": 49408
24
+ }
text_encoder_2/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee9bc5012970ff2320c1908ecf739aa36d5ab404082a7dd3c73abfc10672ca9f
3
+ size 1390289338
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63b7618c80a5bd8d1ee6fe92b28e7b72dde7aaa522963ff083f284501a9ec7df
3
+ size 1683233
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
+ "tokenizer_class": "CLIPTokenizer",
29
+ "unk_token": "<|endoftext|>"
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63b7618c80a5bd8d1ee6fe92b28e7b72dde7aaa522963ff083f284501a9ec7df
3
+ size 1683233
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.29.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "attention_type": "default",
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "cross_attention_dim": 2048,
25
+ "cross_attention_norm": null,
26
+ "down_block_types": [
27
+ "DownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "dropout": 0.0,
33
+ "dual_cross_attention": false,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "in_channels": 4,
39
+ "layers_per_block": 2,
40
+ "mid_block_only_cross_attention": null,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "out_channels": 4,
49
+ "projection_class_embeddings_input_dim": 2816,
50
+ "resnet_out_scale_factor": 1.0,
51
+ "resnet_skip_time_act": false,
52
+ "resnet_time_scale_shift": "default",
53
+ "reverse_transformer_layers_per_block": null,
54
+ "sample_size": 128,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "transformer_layers_per_block": [
61
+ 1,
62
+ 2,
63
+ 10
64
+ ],
65
+ "up_block_types": [
66
+ "CrossAttnUpBlock2D",
67
+ "CrossAttnUpBlock2D",
68
+ "UpBlock2D"
69
+ ],
70
+ "upcast_attention": null,
71
+ "use_linear_projection": true
72
+ }
unet/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f68ab74bb6f7b0f3f2f179c92fef7b18e54c0e64fa7a92bb46315bf66551f20d
3
+ size 1242150
unet/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1ce7d1b7e4322e9a303a9e596285ec714daa985cb2e3f7d111e7756afb2401a
3
+ size 5134903040
vae_decoder/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.20.0.dev0",
4
+ "_name_or_path": "../sdxl-vae/",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 1024,
25
+ "scaling_factor": 0.13025,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
vae_decoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20aae7848dcd708f4dfa562eda63a5df6a4434859f74b7e11dfd9553cf0fceb1
3
+ size 99039207
vae_encoder/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.20.0.dev0",
4
+ "_name_or_path": "../sdxl-vae/",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 1024,
25
+ "scaling_factor": 0.13025,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
vae_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f3cc0532c41f52760353218f5c20c9c19b3d056c038d51c7b71c053ce57e599
3
+ size 68391734