tlwu commited on
Commit
353f31c
1 Parent(s): c7014a1

Olive models

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.data filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,35 @@
1
  ---
2
- license: creativeml-openrail-m
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: openrail++
3
+ base_model: stabilityai/stable-diffusion-xl-refiner-1.0
4
+ language:
5
+ - en
6
+ tags:
7
+ - stable-diffusion
8
+ - stable-diffusion-xl
9
+ - onnxruntime
10
+ - onnx
11
+ - text-to-image
12
  ---
13
+
14
+
15
+ # Stable Diffusion XL 1.0 for ONNX Runtime CUDA provider
16
+
17
+ ## Introduction
18
+
19
+ This repository hosts the optimized versions of **Stable Diffusion XL Refiner 1.0** to accelerate inference with ONNX Runtime CUDA execution provider.
20
+
21
+ The models are generated by [Olive](https://github.com/microsoft/Olive/tree/main/examples/stable_diffusion) with command like the following:
22
+ ```
23
+ python stable_diffusion_xl.py --provider cuda --optimize --use_fp16_fixed_vae --model_id stabilityai/stable-diffusion-xl-refiner-1.0
24
+ ```
25
+
26
+ See the [usage instructions](#usage-example) for how to run the SDXL pipeline with the ONNX files hosted in this repository.
27
+
28
+ ## Model Description
29
+
30
+ - **Developed by:** Stability AI
31
+ - **Model type:** Diffusion-based text-to-image generative model
32
+ - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/LICENSE.md)
33
+ - **Model Description:** This is a conversion of the [SDXL base 1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and [SDXL refiner 1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0) models for [ONNX Runtime](https://github.com/microsoft/onnxruntime) inference with CUDA execution provider.
34
+
35
+ The VAE decoder is converted from [sdxl-vae-fp16-fix](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix). There are slight discrepancies between its output and that of the original VAE, but the decoded images should be [close enough for most purposes](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/discussions/7#64c5c0f8e2e5c94bd04eaa80).
model_index.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ORTStableDiffusionXLImg2ImgPipeline",
3
+ "_diffusers_version": "0.24.0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-xl-refiner-1.0",
5
+ "feature_extractor": [
6
+ null,
7
+ null
8
+ ],
9
+ "force_zeros_for_empty_prompt": false,
10
+ "image_encoder": [
11
+ null,
12
+ null
13
+ ],
14
+ "requires_aesthetics_score": true,
15
+ "scheduler": [
16
+ "diffusers",
17
+ "EulerDiscreteScheduler"
18
+ ],
19
+ "text_encoder": [
20
+ "diffusers",
21
+ "OnnxRuntimeModel"
22
+ ],
23
+ "text_encoder_2": [
24
+ null,
25
+ null
26
+ ],
27
+ "tokenizer": [
28
+ null,
29
+ null
30
+ ],
31
+ "tokenizer_2": [
32
+ "transformers",
33
+ "CLIPTokenizer"
34
+ ],
35
+ "unet": [
36
+ "diffusers",
37
+ "OnnxRuntimeModel"
38
+ ],
39
+ "vae_decoder": [
40
+ "diffusers",
41
+ "OnnxRuntimeModel"
42
+ ],
43
+ "vae_encoder": [
44
+ "diffusers",
45
+ "OnnxRuntimeModel"
46
+ ]
47
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.24.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "epsilon",
11
+ "sample_max_value": 1.0,
12
+ "set_alpha_to_one": false,
13
+ "sigma_max": null,
14
+ "sigma_min": null,
15
+ "skip_prk_steps": true,
16
+ "steps_offset": 1,
17
+ "timestep_spacing": "leading",
18
+ "timestep_type": "discrete",
19
+ "trained_betas": null,
20
+ "use_karras_sigmas": false
21
+ }
text_encoder_2/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b9f04f2d71f0ae8cbb085a78e1e65e2509607d694a84c15dde6de1ce2db58e0
3
+ size 1389427378
text_encoder_2/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3da7ac65349fbd092e836e3eeca2c22811317bc804fd70af157b4550f2d4bcb5
3
+ size 2778639360
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60e07a44541db5c405e1d6210a04e6b1fa1b19a79758e0837cedaa3b00f06a1
3
+ size 568878
unet/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f196e86f27210deadd211e86c079f57eb3468e6ac0d1d1b111a75facad9efd97
3
+ size 4519331328
vae_decoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7987d20deef6934d7d30bd7486da698940765d5383a5ca009f0aad74c737ec70
3
+ size 99072671
vae_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a56f9f96a763bc9995d032d6e03159cf433569047488e7594f0b15066cbed44f
3
+ size 68412330