camenduru commited on
Commit
24211e0
1 Parent(s): a3b9e76

thanks to yodayo-ai ❤

Browse files
.gitattributes CHANGED
@@ -33,3 +33,13 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ samples/sample-001.png filter=lfs diff=lfs merge=lfs -text
37
+ samples/sample-002.png filter=lfs diff=lfs merge=lfs -text
38
+ samples/sample-003.png filter=lfs diff=lfs merge=lfs -text
39
+ samples/sample-004.png filter=lfs diff=lfs merge=lfs -text
40
+ samples/sample-005.png filter=lfs diff=lfs merge=lfs -text
41
+ samples/sample-006.png filter=lfs diff=lfs merge=lfs -text
42
+ samples/sample-007.png filter=lfs diff=lfs merge=lfs -text
43
+ samples/sample-008.png filter=lfs diff=lfs merge=lfs -text
44
+ samples/sample-009.png filter=lfs diff=lfs merge=lfs -text
45
+ samples/sample-010.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: faipl-1.0-sd
4
+ license_link: https://freedevproject.org/faipl-1.0-sd/
5
+ language:
6
+ - en
7
+ tags:
8
+ - text-to-image
9
+ - stable-diffusion
10
+ - safetensors
11
+ - stable-diffusion-xl
12
+ base_model: cagliostrolab/animagine-xl-3.1
13
+ widget:
14
+ - text: 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality, very aesthetic, absurdres
15
+ parameter:
16
+ negative_prompt: nsfw, low quality, worst quality, very displeasing, 3d, watermark, signature, ugly, poorly drawn
17
+ example_title: 1girl
18
+ - text: 1boy, male focus, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality, very aesthetic, absurdres
19
+ parameter:
20
+ negative_prompt: nsfw, low quality, worst quality, very displeasing, 3d, watermark, signature, ugly, poorly drawn
21
+ example_title: 1boy
22
+ ---
23
+
24
+ <style>
25
+
26
+ body {
27
+ display: flex;
28
+ align-items: center;
29
+ justify-content: center;
30
+ height: 100vh;
31
+ margin: 0;
32
+ font-family: Arial, sans-serif;
33
+ background-color: #f4f4f9;
34
+ overflow: auto;
35
+ }
36
+
37
+ .container {
38
+ display: flex;
39
+ flex-direction: column;
40
+ align-items: center;
41
+ justify-content: center;
42
+ width: 100%;
43
+ padding: 20px;
44
+ }
45
+
46
+ .title-container {
47
+ display: flex;
48
+ flex-direction: column;
49
+ justify-content: center;
50
+ align-items: center;
51
+ padding: 1em;
52
+ border-radius: 10px;
53
+ }
54
+
55
+ .title {
56
+ font-size: 3em;
57
+ font-family: 'Montserrat', sans-serif;
58
+ text-align: center;
59
+ font-weight: bold;
60
+ }
61
+
62
+ .title span {
63
+ background: -webkit-linear-gradient(45deg, #ff8e8e, #ffb6c1, #ff69b4);
64
+ -webkit-background-clip: text;
65
+ -webkit-text-fill-color: transparent;
66
+ }
67
+
68
+ .gallery {
69
+ display: grid;
70
+ grid-template-columns: repeat(5, 1fr);
71
+ gap: 10px;
72
+ }
73
+
74
+ .gallery img {
75
+ width: 100%;
76
+ height: auto;
77
+ margin-top: 0px;
78
+ margin-bottom: 0px;
79
+ border-radius: 10px;
80
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
81
+ transition: transform 0.3s;
82
+ }
83
+
84
+ .gallery img:hover {
85
+ transform: scale(1.05);
86
+ }
87
+
88
+ .note {
89
+ font-size: 1em;
90
+ opacity: 50%;
91
+ text-align: center;
92
+ margin-top: 20px;
93
+ color: #555;
94
+ }
95
+
96
+ </style>
97
+
98
+ <div class="container">
99
+ <div class="title-container">
100
+ <div class="title"><span>Holodayo XL 2.1</span></div>
101
+ </div>
102
+ <div class="gallery">
103
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-001.png" alt="Image 1">
104
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-002.png" alt="Image 2">
105
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-003.png" alt="Image 3">
106
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-004.png" alt="Image 4">
107
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-005.png" alt="Image 5">
108
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-006.png" alt="Image 6">
109
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-007.png" alt="Image 7">
110
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-008.png" alt="Image 8">
111
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-009.png" alt="Image 9">
112
+ <img src="https://huggingface.co/yodayo-ai/holodayo-xl-2.1/resolve/main/samples/sample-010.png" alt="Image 10">
113
+ </div>
114
+ <div class="note">
115
+ Drag and drop each image to <a href="https://huggingface.co/spaces/Linaqruf/pnginfo" target="_blank">this link</a> or use ComfyUI to get the metadata.
116
+ </div>
117
+ </div>
118
+
119
+ ## Overview
120
+
121
+ **Holodayo XL 2.1** is the latest version of the [Yodayo Holodayo XL](https://yodayo.com/models/1cafd6f8-8fc6-4282-b8f8-843935acbfe8) series, following the previous iteration, [Holodayo XL 1.0](https://yodayo.com/models/1cafd6f8-8fc6-4282-b8f8-843935acbfe8/?modelversion=2349b302-a726-44ba-933b-e3dc4631a95b). This open-source model is built upon Animagine XL V3, a specialized SDXL model designed for generating high-quality anime-style artwork. Holodayo XL 2.1 has undergone additional fine-tuning and optimization to focus specifically on generating images that accurately represent the visual style and aesthetics of the Virtual Youtuber franchise.
122
+
123
+ Holodayo XL 2.1 was trained to fix everything wrong in [Holodayo XL 2.0](https://yodayo.com/models/1cafd6f8-8fc6-4282-b8f8-843935acbfe8/?modelversion=ca4bf1aa-0baf-44cd-8ee9-8f4c6bba89c8), such as bad hands, bad anatomy, catastrophic forgetting due to the text encoder being trained during the fine-tuning phase, and an overexposed art style by decreasing the aesthetic datasets.
124
+
125
+ ## Model Details
126
+ - **Developed by**: [Linaqruf](https://github.com/Linaqruf)
127
+ - **Model type**: Diffusion-based text-to-image generative model
128
+ - **Model Description**: Holodayo XL 2.1, the latest in the Yodayo Holodayo XL series, is an open-source model built on Animagine XL V3. Fine-tuned for high-quality Virtual Youtuber anime-style art generation.
129
+ - **License**: [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/)
130
+ - **Finetuned from model**: [Animagine XL 3.1](https://huggingface.co/cagliostrolab/animagine-xl-3.1)
131
+
132
+ ## Supported Platform
133
+ 1. Use this model in our platform: [![Open In Spaces](https://img.shields.io/badge/Generate%20in%20Yodayo-141414?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAMAAABEpIrGAAAAIGNIUk0AAHomAACAhAAA+gAAAIDoAAB1MAAA6mAAADqYAAAXcJy6UTwAAAGtUExURf/JZf/KZf/LZf64aftuevx+dv7DZv/HZvyKc/toe/2wa//KZP/MZPt4d/oIjvQUj7uVmPXKa/6va/ohifsFjcpfmtvGe//JZPtme/QOkGOEz87Hg//JY/2mbfoYi/4Hi5lNuoq/rfUOkF2E08THifoZiplOun6/tF6E0sXHiPUOj16F0sXHif6mbfoYivoIjVyG08TJiP/MYv/NZPYNj4Bpw9Cdiv+fbf2eb/2fb/60av2mbPoLjfIRkfcUjfoUi/oUjPkuh+mBgfgai/sJjf4Ii/8Ii/8Hi+8RkoJpw+galf+5aN5pjJ9Ot5lPuplRupxQuYtawIddwvERke/Ib6XAnY+/qpDAqpDCqo+8q42Zs5lcuNInoPcNjvsKjP8GioxXwHzAtf/KY/++Zv+OcP5Lfv4aiP4Ji+4TkrA+rzKZ6JPBp/61avpEgvoQjP0IjN8empdQu0iL3jaz4X2/tevHcvyYcPoOjP4HjPYOj8kto3hmyTid5EW615TCpt/Gef3JZf+8aO5fhKlGslt71jOq5V2+yLPElPDHb/PHbZW9p4TBsM7FhPrIaP///xdsY3gAAAABYktHRI6CBbNvAAAAB3RJTUUH6AIMCis5IjcvIAAAAE96VFh0UmF3IHByb2ZpbGUgdHlwZSBpcHRjAAB4nOPKLChJ5lIAAyMLLmMLEyMTS5MUAxMgRIA0w2QDI7NUIMvY1MjEzMQcxAfLgEigSi4AKJUO4yoibR8AAAEJSURBVDjLY2AYSoCRiQnOZmJixJRnZmFlg7LZOTi5uNEV8PDy8QsIQvQLCYuIiomjKWCS4JOUkpYBM2Xl5BUUZTAVKCmrQBWoyqupY1EgqaGJX4GWtg5EgS5OE3Twm6BESAHCCj2sCvQlDQyNeIDAGJcJJqZm5hYWFpZW1jgU2Nja2QOBg6OTMxYFPLwurm7yIODu4enljqmA0dvH1w8E/AMCg4LdMBUwcIeEhoWFR0RGRcfExsUnJGIoYBCXkUlKTklNS3d1zcjMysZUALQmJzdPPz+uoLCouKRUHIsCnrLyisqq6prauvoGbPIMjI1NzS2tbe0dMlilQQ7t7Oru6cUpDXUpwxAEACsWOLO6J6SrAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDI0LTAyLTEyVDEwOjQzOjU3KzAwOjAwbykEPgAAACV0RVh0ZGF0ZTptb2RpZnkAMjAyNC0wMi0xMlQxMDo0Mzo1NyswMDowMB50vIIAAAAASUVORK5CYII=)](https://yodayo.com/models/1cafd6f8-8fc6-4282-b8f8-843935acbfe8/?modelversion=6862d809-0cbc-4fe0-83dc-9206d60b0698)
134
+ 2. Use it in [`ComfyUI`](https://github.com/comfyanonymous/ComfyUI) or [`Stable Diffusion Webui`](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
135
+ 3. Use it with 🧨 `diffusers`
136
+
137
+ ## 🧨 Diffusers Installation
138
+
139
+ First install the required libraries:
140
+
141
+ ```bash
142
+ pip install diffusers transformers accelerate safetensors --upgrade
143
+ ```
144
+
145
+ Then run image generation with the following example code:
146
+
147
+ ```python
148
+ import torch
149
+ from diffusers import StableDiffusionXLPipeline
150
+
151
+ pipe = StableDiffusionXLPipeline.from_pretrained(
152
+ "yodayo-ai/holodayo-xl-2.1",
153
+ torch_dtype=torch.float16,
154
+ use_safetensors=True,
155
+ custom_pipeline="lpw_stable_diffusion_xl",
156
+ add_watermarker=False,
157
+ variant="fp16"
158
+ )
159
+ pipe.to('cuda')
160
+
161
+ prompt = "1girl, nakiri ayame, nakiri ayame \(1st costume\), hololive, solo, upper body, v, smile, looking at viewer, outdoors, night, masterpiece, best quality, very aesthetic, absurdres"
162
+ negative_prompt = "nsfw, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn"
163
+
164
+ image = pipe(
165
+ prompt,
166
+ negative_prompt=negative_prompt,
167
+ width=832,
168
+ height=1216,
169
+ guidance_scale=7,
170
+ num_inference_steps=28
171
+ ).images[0]
172
+
173
+ image.save("./waifu.png")
174
+ ```
175
+
176
+ ## Usage Guidelines
177
+
178
+ ### Tag Ordering
179
+
180
+ For optimal results, it's recommended to follow the structured prompt template because we train the model like this:
181
+
182
+ ```
183
+ 1girl/1boy, character name, from which series, by which artists, everything else in any order.
184
+ ```
185
+
186
+ ### Special Tags
187
+
188
+ Holodayo XL 2.1 inherits special tags from Animagine XL 3.1 to enhance image generation by steering results toward quality, rating, creation date, and aesthetic. This inheritance ensures that Holodayo XL 2.1 can produce high-quality, relevant, and aesthetically pleasing images. While the model can generate images without these tags, using them helps achieve better results.
189
+
190
+ - **Quality tags**: masterpiece, best quality, great quality, good quality, normal quality, low quality, worst quality
191
+ - **Rating tags**: safe, sensitive, nsfw, explicit
192
+ - **Year tags**: newest, recent, mid, early, oldest
193
+ - **Aesthetic tags**: very aesthetic, aesthetic, displeasing, very displeasing
194
+
195
+ ### Recommended Settings
196
+
197
+ To guide the model towards generating high-aesthetic images, use the following recommended settings:
198
+
199
+ - **Negative prompts**:
200
+ ```
201
+ nsfw, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn
202
+ ```
203
+ - **Positive prompts**:
204
+ ```
205
+ masterpiece, best quality, very aesthetic, absurdres
206
+ ```
207
+ - **Classifier-Free Guidance (CFG) Scale**: should be around 5 to 7; 10 is fried, >12 is deep-fried.
208
+ - **Sampling steps**: should be around 25 to 30; 28 is the sweet spot.
209
+ - **Sampler**: Euler Ancestral (Euler a) is highly recommended.
210
+ - **Supported resolutions**:
211
+ ```
212
+ 1024 x 1024, 1152 x 896, 896 x 1152, 1216 x 832, 832 x 1216, 1344 x 768, 768 x 1344, 1536 x 640, 640 x 1536
213
+ ```
214
+
215
+ ## Training
216
+
217
+ These are the key hyperparameters used during training:
218
+
219
+ | Feature | Pretraining | Finetuning |
220
+ |-------------------------------|----------------------------|---------------------------------|
221
+ | **Hardware** | 2x H100 80GB PCIe | 2x A100 80GB PCIe |
222
+ | **Batch Size** | 64 | 48 |
223
+ | **Gradient Accumulation Steps** | 2 | 1 |
224
+ | **Noise Offset** | None | 0.0357 |
225
+ | **Epochs** | 10 | 10 |
226
+ | **UNet Learning Rate** | 7.5e-6 | 7.5e-6 |
227
+ | **Text Encoder Learning Rate** | 3.75e-6 | None |
228
+ | **Optimizer** | AdamW8bit | Adafactor |
229
+ | **Optimizer Args** | Weight Decay: 0.1, Betas: (0.9, 0.99) | Scale Parameter: False, Relative Step: False, Warmup Init: False |
230
+ | **Scheduler** | Constant with Warmups | Constant with Warmups |
231
+ | **Warmup Steps** | 0.5% | 0.5% |
232
+
233
+ ## License
234
+
235
+ Holodayo XL 2.1 falls under [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/) license, which is compatible with Stable Diffusion models’ license. Key points:
236
+
237
+ 1. **Modification Sharing:** If you modify Holodayo XL 2.1, you must share both your changes and the original license.
238
+ 2. **Source Code Accessibility:** If your modified version is network-accessible, provide a way (like a download link) for others to get the source code. This applies to derived models too.
239
+ 3. **Distribution Terms:** Any distribution must be under this license or another with similar rules.
holodayo-xl-2.1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e51f7a220bb18fc5a2365b6f8871a34163cb8549529cc1f2cdc9fd5816c9fd88
3
+ size 6938225848
model_index.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.28.0",
4
+ "feature_extractor": [
5
+ null,
6
+ null
7
+ ],
8
+ "force_zeros_for_empty_prompt": true,
9
+ "image_encoder": [
10
+ null,
11
+ null
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "EulerAncestralDiscreteScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "transformers",
19
+ "CLIPTextModel"
20
+ ],
21
+ "text_encoder_2": [
22
+ "transformers",
23
+ "CLIPTextModelWithProjection"
24
+ ],
25
+ "tokenizer": [
26
+ "transformers",
27
+ "CLIPTokenizer"
28
+ ],
29
+ "tokenizer_2": [
30
+ "transformers",
31
+ "CLIPTokenizer"
32
+ ],
33
+ "unet": [
34
+ "diffusers",
35
+ "UNet2DConditionModel"
36
+ ],
37
+ "vae": [
38
+ "diffusers",
39
+ "AutoencoderKL"
40
+ ]
41
+ }
samples/sample-001.png ADDED

Git LFS Details

  • SHA256: ef07d5ab2188d110698deed13ceead823e69ce564c7dacb2d37f47d0f151decd
  • Pointer size: 132 Bytes
  • Size of remote file: 1.4 MB
samples/sample-002.png ADDED

Git LFS Details

  • SHA256: 9cebd915083e9e50c73da0d4516632737235ef5838fcb71b0d5c55d25ee15e8f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.47 MB
samples/sample-003.png ADDED

Git LFS Details

  • SHA256: 859d786be3a09b951440ea45b68e1559629bba62b617a3d126f4db661d63fa4e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.53 MB
samples/sample-004.png ADDED

Git LFS Details

  • SHA256: 98172b1abe4b00d1dca0743fafaad9d15d16879fd6fe3d5dd30916f3d0c44ce5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.54 MB
samples/sample-005.png ADDED

Git LFS Details

  • SHA256: 25f63159aaafcad9106589b1a346631dfe4413af9bba82dea711ccd77fc90f8b
  • Pointer size: 132 Bytes
  • Size of remote file: 1.3 MB
samples/sample-006.png ADDED

Git LFS Details

  • SHA256: 88469547f2b86842399f264cac61af171e59eb3e0c03c13eb8ed38e9d7df90fe
  • Pointer size: 132 Bytes
  • Size of remote file: 1.38 MB
samples/sample-007.png ADDED

Git LFS Details

  • SHA256: 72677d3f02e35fdda0504309acd8efe61901d5311ada1ae8eb6cb765dbfa8358
  • Pointer size: 132 Bytes
  • Size of remote file: 1.33 MB
samples/sample-008.png ADDED

Git LFS Details

  • SHA256: a4f3d4fd2ca60529be74997bcbc2fb9bfb32f982d5f2af24722befd2fa6cf0e4
  • Pointer size: 132 Bytes
  • Size of remote file: 1.33 MB
samples/sample-009.png ADDED

Git LFS Details

  • SHA256: 01649f7134c651193bb39ad6b77b01671689c1abe87e8b6931a352bae93b15e9
  • Pointer size: 132 Bytes
  • Size of remote file: 1.55 MB
samples/sample-010.png ADDED

Git LFS Details

  • SHA256: 044f7bf145d1f8b744295ea23f51a025570adf9312e88eccfd1545cdb68d1913
  • Pointer size: 132 Bytes
  • Size of remote file: 1.45 MB
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerAncestralDiscreteScheduler",
3
+ "_diffusers_version": "0.28.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "epsilon",
11
+ "rescale_betas_zero_snr": false,
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": false,
14
+ "skip_prk_steps": true,
15
+ "steps_offset": 1,
16
+ "timestep_spacing": "leading",
17
+ "trained_betas": null,
18
+ "use_karras_sigmas": false
19
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "quick_gelu",
10
+ "hidden_size": 768,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.41.1",
23
+ "vocab_size": 49408
24
+ }
text_encoder/model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c596edaa5341f944b404de48ba8a6aa2882de4a2a4a158796a662c28e3f47395
3
+ size 246144152
text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a5f47400d43eee1d1716bf53f652ec67025cc26a401860d4db4fa4e22eb8a62
3
+ size 492265168
text_encoder_2/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1280,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 20,
18
+ "num_hidden_layers": 32,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 1280,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.41.1",
23
+ "vocab_size": 49408
24
+ }
text_encoder_2/model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a25bd599acd8be7f2e61f071e052b7fe3d111274a019884020391b45e34a464
3
+ size 1389382176
text_encoder_2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b168fb9761bffe7bcd816c82904e92138775777a161447e5c4c84ed0733ad57
3
+ size 2778702264
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
+ "tokenizer_class": "CLIPTokenizer",
29
+ "unk_token": "<|endoftext|>"
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.28.0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "attention_type": "default",
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "cross_attention_dim": 2048,
25
+ "cross_attention_norm": null,
26
+ "down_block_types": [
27
+ "DownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "dropout": 0.0,
33
+ "dual_cross_attention": false,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "in_channels": 4,
39
+ "layers_per_block": 2,
40
+ "mid_block_only_cross_attention": null,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "out_channels": 4,
49
+ "projection_class_embeddings_input_dim": 2816,
50
+ "resnet_out_scale_factor": 1.0,
51
+ "resnet_skip_time_act": false,
52
+ "resnet_time_scale_shift": "default",
53
+ "reverse_transformer_layers_per_block": null,
54
+ "sample_size": 128,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "transformer_layers_per_block": [
61
+ 1,
62
+ 2,
63
+ 10
64
+ ],
65
+ "up_block_types": [
66
+ "CrossAttnUpBlock2D",
67
+ "CrossAttnUpBlock2D",
68
+ "UpBlock2D"
69
+ ],
70
+ "upcast_attention": null,
71
+ "use_linear_projection": true
72
+ }
unet/diffusion_pytorch_model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13fa6a504fdaaa4b94bf94ae072d0f36af47b3eed7cc40b364aa527235993a86
3
+ size 5135149760
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7545fcb7019b081abd6381b9fb00c6fbf7565adde582ec3b04ae2966623cbf60
3
+ size 10270077736
vae/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.28.0",
4
+ "_name_or_path": "madebyollin/sdxl-vae-fp16-fix",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": false,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "latents_mean": null,
22
+ "latents_std": null,
23
+ "layers_per_block": 2,
24
+ "norm_num_groups": 32,
25
+ "out_channels": 3,
26
+ "sample_size": 512,
27
+ "scaling_factor": 0.13025,
28
+ "up_block_types": [
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D",
31
+ "UpDecoderBlock2D",
32
+ "UpDecoderBlock2D"
33
+ ]
34
+ }
vae/diffusion_pytorch_model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646
3
+ size 334643268
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646
3
+ size 334643268