Kano001 commited on
Commit
3de498f
1 Parent(s): ca2fff5

Upload 24 files

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ language:
4
+ - en
5
+ pipeline_tag: text-to-image
6
+ tags:
7
+ - openvino
8
+ - text-to-image
9
+ ---
10
+
11
+ Model Descriptions:
12
+
13
+ This repo contains OpenVino model files for [SimianLuo's LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7).
14
+
15
+ Hugging Face Demo on CPU:
16
+
17
+ [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/deinferno/Latent_Consistency_Model_OpenVino_CPU)
18
+
19
+ Generation Results:
20
+
21
+ By converting model to OpenVino format and using Intel(R) Xeon(R) Gold 5220R CPU @ 2.20GHz 24C/48T x 2 we can achieve following results compared to original PyTorch LCM.
22
+
23
+ Results time includes first compile and reshape phases and should be taken with grain of salt because benchmark was run using 2 socketed server which can underperform in those types of workload.
24
+
25
+ Number of images per batch is set to 1
26
+
27
+ |Run No.|Pytorch|OpenVino|Openvino w/reshape|
28
+ |-------|-------|--------|------------------|
29
+ |1 |15.5841|18.0010 |13.4928 |
30
+ |2 |12.4634|5.0208 |3.6855 |
31
+ |3 |12.1551|4.9462 |3.7228 |
32
+
33
+ Number of images per batch is set to 4
34
+
35
+ |Run No.|Pytorch|OpenVino|Openvino w/reshape|
36
+ |-------|-------|--------|------------------|
37
+ |1 |31.3666|33.1488 |25.7044 |
38
+ |2 |33.4797|17.7456 |12.8295 |
39
+ |3 |28.6561|17.9216 |12.7198 |
40
+
41
+
42
+ To run the model yourself, you can leverage the 🧨 Diffusers/🤗 Optimum library:
43
+ 1. Install the library:
44
+ ```
45
+ pip install diffusers transformers accelerate optimum
46
+ pip install --upgrade-strategy eager optimum[openvino]
47
+ ```
48
+
49
+ 2. Clone inference code:
50
+ ```
51
+ git clone https://huggingface.co/deinferno/LCM_Dreamshaper_v7-openvino
52
+ cd LCM_Dreamshaper_v7-openvino
53
+ ```
54
+
55
+ 2. Run the model:
56
+ ```py
57
+ from lcm_ov_pipeline import OVLatentConsistencyModelPipeline
58
+ from lcm_scheduler import LCMScheduler
59
+
60
+ model_id = "deinferno/LCM_Dreamshaper_v7-openvino"
61
+
62
+ scheduler = LCMScheduler.from_pretrained(model_id, subfolder = "scheduler")
63
+
64
+ # Use "compile = True" if you don't plan to reshape and recompile model after loading
65
+ # Don't forget to disable OpenVino cache via "ov_config = {"CACHE_DIR":""}" because optimum won't use it anyway and it will stay as dead weight in your RAM when loading pipeline again
66
+ pipe = OVLatentConsistencyModelPipeline.from_pretrained(model_id, scheduler = scheduler, compile = False, ov_config = {"CACHE_DIR":""})
67
+
68
+ prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
69
+
70
+ # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.
71
+
72
+ width = 512
73
+ height = 512
74
+ num_images = 1
75
+ batch_size = 1
76
+ num_inference_steps = 4
77
+
78
+ # Reshape and recompile for inference speed
79
+
80
+ pipe.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
81
+ pipe.compile()
82
+
83
+ images = pipe(prompt=prompt, width=width, height=height, num_inference_steps=num_inference_steps, guidance_scale=8.0, output_type="pil").images
84
+ ```
convert_to_openvino.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Optional, Tuple, OrderedDict
2
+ from transformers import CLIPTextConfig
3
+ from diffusers import UNet2DConditionModel
4
+
5
+ import torch
6
+
7
+ from optimum.exporters.onnx.model_configs import VisionOnnxConfig, NormalizedConfig, DummyVisionInputGenerator, DummyTimestepInputGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqDecoderTextInputGenerator
8
+ from optimum.exporters.openvino import main_export
9
+ from optimum.utils.input_generators import DummyInputGenerator, DEFAULT_DUMMY_SHAPES
10
+ from optimum.utils.normalized_config import NormalizedTextConfig
11
+
12
+ # IMPORTANT: You need to specify some scheduler in downloaded model cache folder to avoid errors
13
+
14
+ class CustomDummyTimestepInputGenerator(DummyInputGenerator):
15
+ """
16
+ Generates dummy time step inputs.
17
+ """
18
+
19
+ SUPPORTED_INPUT_NAMES = (
20
+ "timestep",
21
+ "timestep_cond",
22
+ "text_embeds",
23
+ "time_ids",
24
+ )
25
+
26
+ def __init__(
27
+ self,
28
+ task: str,
29
+ normalized_config: NormalizedConfig,
30
+ batch_size: int = DEFAULT_DUMMY_SHAPES["batch_size"],
31
+ time_cond_proj_dim: int = 256,
32
+ random_batch_size_range: Optional[Tuple[int, int]] = None,
33
+ **kwargs,
34
+ ):
35
+ self.task = task
36
+ self.vocab_size = normalized_config.vocab_size
37
+ self.text_encoder_projection_dim = normalized_config.text_encoder_projection_dim
38
+ self.time_ids = 5 if normalized_config.requires_aesthetics_score else 6
39
+ if random_batch_size_range:
40
+ low, high = random_batch_size_range
41
+ self.batch_size = random.randint(low, high)
42
+ else:
43
+ self.batch_size = batch_size
44
+ self.time_cond_proj_dim = normalized_config.get("time_cond_proj_dim", time_cond_proj_dim)
45
+
46
+ def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int64", float_dtype: str = "fp32"):
47
+ shape = [self.batch_size]
48
+
49
+ if input_name == "timestep":
50
+ return self.random_int_tensor(shape, max_value=self.vocab_size, framework=framework, dtype=int_dtype)
51
+
52
+ if input_name == "timestep_cond":
53
+ shape.append(self.time_cond_proj_dim)
54
+ return self.random_float_tensor(shape, min_value=-1.0, max_value=1.0, framework=framework, dtype=float_dtype)
55
+
56
+
57
+ shape.append(self.text_encoder_projection_dim if input_name == "text_embeds" else self.time_ids)
58
+ return self.random_float_tensor(shape, max_value=self.vocab_size, framework=framework, dtype=float_dtype)
59
+
60
+ class LCMUNetOnnxConfig(VisionOnnxConfig):
61
+ ATOL_FOR_VALIDATION = 1e-3
62
+ # The ONNX export of a CLIPText architecture, an other Stable Diffusion component, needs the Trilu
63
+ # operator support, available since opset 14
64
+ DEFAULT_ONNX_OPSET = 14
65
+
66
+ NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(
67
+ image_size="sample_size",
68
+ num_channels="in_channels",
69
+ hidden_size="cross_attention_dim",
70
+ vocab_size="norm_num_groups",
71
+ allow_new=True,
72
+ )
73
+
74
+ DUMMY_INPUT_GENERATOR_CLASSES = (
75
+ DummyVisionInputGenerator,
76
+ CustomDummyTimestepInputGenerator,
77
+ DummySeq2SeqDecoderTextInputGenerator,
78
+ )
79
+
80
+ @property
81
+ def inputs(self) -> Dict[str, Dict[int, str]]:
82
+ common_inputs = OrderedDict({
83
+ "sample": {0: "batch_size", 1: "num_channels", 2: "height", 3: "width"},
84
+ "timestep": {0: "steps"},
85
+ "encoder_hidden_states": {0: "batch_size", 1: "sequence_length"},
86
+ "timestep_cond": {0: "batch_size"},
87
+ })
88
+
89
+ # TODO : add text_image, image and image_embeds
90
+ if getattr(self._normalized_config, "addition_embed_type", None) == "text_time":
91
+ common_inputs["text_embeds"] = {0: "batch_size"}
92
+ common_inputs["time_ids"] = {0: "batch_size"}
93
+
94
+ return common_inputs
95
+
96
+ @property
97
+ def outputs(self) -> Dict[str, Dict[int, str]]:
98
+ return {
99
+ "out_sample": {0: "batch_size", 1: "num_channels", 2: "height", 3: "width"},
100
+ }
101
+
102
+ @property
103
+ def torch_to_onnx_output_map(self) -> Dict[str, str]:
104
+ return {
105
+ "sample": "out_sample",
106
+ }
107
+
108
+ def generate_dummy_inputs(self, framework: str = "pt", **kwargs):
109
+ dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)
110
+ dummy_inputs["encoder_hidden_states"] = dummy_inputs["encoder_hidden_states"][0]
111
+
112
+ if getattr(self._normalized_config, "addition_embed_type", None) == "text_time":
113
+ dummy_inputs["added_cond_kwargs"] = {
114
+ "text_embeds": dummy_inputs.pop("text_embeds"),
115
+ "time_ids": dummy_inputs.pop("time_ids"),
116
+ }
117
+
118
+ return dummy_inputs
119
+
120
+ def ordered_inputs(self, model) -> Dict[str, Dict[int, str]]:
121
+ return self.inputs # Breaks order if timestep_cond involved ( so just copy original one )
122
+
123
+ model_id = "SimianLuo/LCM_Dreamshaper_v7"
124
+
125
+ text_encoder_config = CLIPTextConfig.from_pretrained(model_id, subfolder = "text_encoder")
126
+ unet_config = UNet2DConditionModel.from_pretrained(model_id, subfolder = "unet").config
127
+
128
+ unet_config.text_encoder_projection_dim = text_encoder_config.projection_dim
129
+ unet_config.requires_aesthetics_score = False
130
+
131
+ custom_onnx_configs = {
132
+ "unet": LCMUNetOnnxConfig(config = unet_config, task = "semantic-segmentation")
133
+ }
134
+
135
+ main_export(model_name_or_path = model_id, output = "./", task = "stable-diffusion", fp16 = False, int8 = False, custom_onnx_configs = custom_onnx_configs)
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
lcm_ov_pipeline.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from pathlib import Path
4
+ from tempfile import TemporaryDirectory
5
+ from typing import List, Optional, Tuple, Union, Dict, Any, Callable, OrderedDict
6
+
7
+ import numpy as np
8
+ import openvino
9
+ import torch
10
+
11
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
12
+ from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline, OVModelUnet, OVModelVaeDecoder, OVModelTextEncoder, OVModelVaeEncoder, VaeImageProcessor
13
+ from optimum.utils import (
14
+ DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER,
15
+ DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
16
+ DIFFUSION_MODEL_UNET_SUBFOLDER,
17
+ DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER,
18
+ DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER,
19
+ )
20
+
21
+
22
+ from diffusers import logging
23
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
+
25
+ class LCMOVModelUnet(OVModelUnet):
26
+ def __call__(
27
+ self,
28
+ sample: np.ndarray,
29
+ timestep: np.ndarray,
30
+ encoder_hidden_states: np.ndarray,
31
+ timestep_cond: Optional[np.ndarray] = None,
32
+ text_embeds: Optional[np.ndarray] = None,
33
+ time_ids: Optional[np.ndarray] = None,
34
+ ):
35
+ self._compile()
36
+
37
+ inputs = {
38
+ "sample": sample,
39
+ "timestep": timestep,
40
+ "encoder_hidden_states": encoder_hidden_states,
41
+ }
42
+
43
+ if timestep_cond is not None:
44
+ inputs["timestep_cond"] = timestep_cond
45
+ if text_embeds is not None:
46
+ inputs["text_embeds"] = text_embeds
47
+ if time_ids is not None:
48
+ inputs["time_ids"] = time_ids
49
+
50
+ outputs = self.request(inputs, shared_memory=True)
51
+ return list(outputs.values())
52
+
53
+ class OVLatentConsistencyModelPipeline(OVStableDiffusionPipeline):
54
+
55
+ def __init__(
56
+ self,
57
+ vae_decoder: openvino.runtime.Model,
58
+ text_encoder: openvino.runtime.Model,
59
+ unet: openvino.runtime.Model,
60
+ config: Dict[str, Any],
61
+ tokenizer: "CLIPTokenizer",
62
+ scheduler: Union["DDIMScheduler", "PNDMScheduler", "LMSDiscreteScheduler"],
63
+ feature_extractor: Optional["CLIPFeatureExtractor"] = None,
64
+ vae_encoder: Optional[openvino.runtime.Model] = None,
65
+ text_encoder_2: Optional[openvino.runtime.Model] = None,
66
+ tokenizer_2: Optional["CLIPTokenizer"] = None,
67
+ device: str = "CPU",
68
+ dynamic_shapes: bool = True,
69
+ compile: bool = True,
70
+ ov_config: Optional[Dict[str, str]] = None,
71
+ model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
72
+ **kwargs,
73
+ ):
74
+ self._internal_dict = config
75
+ self._device = device.upper()
76
+ self.is_dynamic = dynamic_shapes
77
+ self.ov_config = ov_config if ov_config is not None else {}
78
+ self._model_save_dir = (
79
+ Path(model_save_dir.name) if isinstance(model_save_dir, TemporaryDirectory) else model_save_dir
80
+ )
81
+ self.vae_decoder = OVModelVaeDecoder(vae_decoder, self)
82
+ self.unet = LCMOVModelUnet(unet, self)
83
+ self.text_encoder = OVModelTextEncoder(text_encoder, self) if text_encoder is not None else None
84
+ self.text_encoder_2 = (
85
+ OVModelTextEncoder(text_encoder_2, self, model_name=DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER)
86
+ if text_encoder_2 is not None
87
+ else None
88
+ )
89
+ self.vae_encoder = OVModelVaeEncoder(vae_encoder, self) if vae_encoder is not None else None
90
+
91
+ if "block_out_channels" in self.vae_decoder.config:
92
+ self.vae_scale_factor = 2 ** (len(self.vae_decoder.config["block_out_channels"]) - 1)
93
+ else:
94
+ self.vae_scale_factor = 8
95
+
96
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
97
+
98
+ self.tokenizer = tokenizer
99
+ self.tokenizer_2 = tokenizer_2
100
+ self.scheduler = scheduler
101
+ self.feature_extractor = feature_extractor
102
+ self.safety_checker = None
103
+ self.preprocessors = []
104
+
105
+ if self.is_dynamic:
106
+ self.reshape(batch_size=-1, height=-1, width=-1, num_images_per_prompt=-1)
107
+
108
+ if compile:
109
+ self.compile()
110
+
111
+ sub_models = {
112
+ DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER: self.text_encoder,
113
+ DIFFUSION_MODEL_UNET_SUBFOLDER: self.unet,
114
+ DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER: self.vae_decoder,
115
+ DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER: self.vae_encoder,
116
+ DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER: self.text_encoder_2,
117
+ }
118
+ for name in sub_models.keys():
119
+ self._internal_dict[name] = (
120
+ ("optimum", sub_models[name].__class__.__name__) if sub_models[name] is not None else (None, None)
121
+ )
122
+
123
+ self._internal_dict.pop("vae", None)
124
+
125
+ def _reshape_unet(
126
+ self,
127
+ model: openvino.runtime.Model,
128
+ batch_size: int = -1,
129
+ height: int = -1,
130
+ width: int = -1,
131
+ num_images_per_prompt: int = -1,
132
+ tokenizer_max_length: int = -1,
133
+ ):
134
+ if batch_size == -1 or num_images_per_prompt == -1:
135
+ batch_size = -1
136
+ else:
137
+ batch_size = batch_size * num_images_per_prompt
138
+
139
+ height = height // self.vae_scale_factor if height > 0 else height
140
+ width = width // self.vae_scale_factor if width > 0 else width
141
+ shapes = {}
142
+ for inputs in model.inputs:
143
+ shapes[inputs] = inputs.get_partial_shape()
144
+ if inputs.get_any_name() == "timestep":
145
+ shapes[inputs][0] = 1
146
+ elif inputs.get_any_name() == "sample":
147
+ in_channels = self.unet.config.get("in_channels", None)
148
+ if in_channels is None:
149
+ in_channels = shapes[inputs][1]
150
+ if in_channels.is_dynamic:
151
+ logger.warning(
152
+ "Could not identify `in_channels` from the unet configuration, to statically reshape the unet please provide a configuration."
153
+ )
154
+ self.is_dynamic = True
155
+
156
+ shapes[inputs] = [batch_size, in_channels, height, width]
157
+ elif inputs.get_any_name() == "timestep_cond":
158
+ shapes[inputs] = [batch_size, inputs.get_partial_shape()[1]]
159
+ elif inputs.get_any_name() == "text_embeds":
160
+ shapes[inputs] = [batch_size, self.text_encoder_2.config["projection_dim"]]
161
+ elif inputs.get_any_name() == "time_ids":
162
+ shapes[inputs] = [batch_size, inputs.get_partial_shape()[1]]
163
+ else:
164
+ shapes[inputs][0] = batch_size
165
+ shapes[inputs][1] = tokenizer_max_length
166
+ model.reshape(shapes)
167
+ return model
168
+
169
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=np.float32):
170
+ """
171
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
172
+ Args:
173
+ timesteps: np.array: generate embedding vectors at these timesteps
174
+ embedding_dim: int: dimension of the embeddings to generate
175
+ dtype: data type of the generated embeddings
176
+
177
+ Returns:
178
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
179
+ """
180
+ assert len(w.shape) == 1
181
+ w = w * 1000.
182
+
183
+ half_dim = embedding_dim // 2
184
+ emb = np.log(np.array(10000.)) / (half_dim - 1)
185
+ emb = np.exp(np.arange(half_dim, dtype=dtype) * -emb)
186
+ emb = w.astype(dtype)[:, None] * emb[None, :]
187
+ emb = np.concatenate([np.sin(emb), np.cos(emb)], axis=1)
188
+ if embedding_dim % 2 == 1: # zero pad
189
+ emb = np.pad(emb, (0, 1))
190
+ assert emb.shape == (w.shape[0], embedding_dim)
191
+ return emb
192
+
193
+ # Adapted from https://github.com/huggingface/optimum/blob/15b8d1eed4d83c5004d3b60f6b6f13744b358f01/optimum/pipelines/diffusers/pipeline_stable_diffusion.py#L201
194
+ def __call__(
195
+ self,
196
+ prompt: Optional[Union[str, List[str]]] = None,
197
+ height: Optional[int] = None,
198
+ width: Optional[int] = None,
199
+ num_inference_steps: int = 4,
200
+ original_inference_steps: int = None,
201
+ guidance_scale: float = 7.5,
202
+ num_images_per_prompt: int = 1,
203
+ eta: float = 0.0,
204
+ generator: Optional[np.random.RandomState] = None,
205
+ latents: Optional[np.ndarray] = None,
206
+ prompt_embeds: Optional[np.ndarray] = None,
207
+ output_type: str = "pil",
208
+ return_dict: bool = True,
209
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
210
+ callback_steps: int = 1,
211
+ guidance_rescale: float = 0.0,
212
+ ):
213
+ r"""
214
+ Function invoked when calling the pipeline for generation.
215
+
216
+ Args:
217
+ prompt (`Optional[Union[str, List[str]]]`, defaults to None):
218
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
219
+ instead.
220
+ height (`Optional[int]`, defaults to None):
221
+ The height in pixels of the generated image.
222
+ width (`Optional[int]`, defaults to None):
223
+ The width in pixels of the generated image.
224
+ num_inference_steps (`int`, defaults to 4):
225
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
226
+ expense of slower inference.
227
+ original_inference_steps (`int`, *optional*):
228
+ The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
229
+ we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
230
+ following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
231
+ scheduler's `original_inference_steps` attribute.
232
+ guidance_scale (`float`, defaults to 7.5):
233
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
234
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
235
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
236
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
237
+ usually at the expense of lower image quality.
238
+ num_images_per_prompt (`int`, defaults to 1):
239
+ The number of images to generate per prompt.
240
+ eta (`float`, defaults to 0.0):
241
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
242
+ [`schedulers.DDIMScheduler`], will be ignored for others.
243
+ generator (`Optional[np.random.RandomState]`, defaults to `None`)::
244
+ A np.random.RandomState to make generation deterministic.
245
+ latents (`Optional[np.ndarray]`, defaults to `None`):
246
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
247
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
248
+ tensor will ge generated by sampling using the supplied random `generator`.
249
+ prompt_embeds (`Optional[np.ndarray]`, defaults to `None`):
250
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
251
+ provided, text embeddings will be generated from `prompt` input argument.
252
+ output_type (`str`, defaults to `"pil"`):
253
+ The output format of the generate image. Choose between
254
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
255
+ return_dict (`bool`, defaults to `True`):
256
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
257
+ plain tuple.
258
+ callback (Optional[Callable], defaults to `None`):
259
+ A function that will be called every `callback_steps` steps during inference. The function will be
260
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
261
+ callback_steps (`int`, defaults to 1):
262
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
263
+ called at every step.
264
+ guidance_rescale (`float`, defaults to 0.0):
265
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
266
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
267
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
268
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
269
+
270
+ Returns:
271
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
272
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
273
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
274
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
275
+ (nsfw) content, according to the `safety_checker`.
276
+ """
277
+ height = height or self.unet.config.get("sample_size", 64) * self.vae_scale_factor
278
+ width = width or self.unet.config.get("sample_size", 64) * self.vae_scale_factor
279
+
280
+ # check inputs. Raise error if not correct
281
+ self.check_inputs(
282
+ prompt, height, width, callback_steps, None, prompt_embeds, None
283
+ )
284
+
285
+ # define call parameters
286
+ if isinstance(prompt, str):
287
+ batch_size = 1
288
+ elif isinstance(prompt, list):
289
+ batch_size = len(prompt)
290
+ else:
291
+ batch_size = prompt_embeds.shape[0]
292
+
293
+ if generator is None:
294
+ generator = np.random
295
+
296
+ # Create torch.Generator instance with same state as np.random.RandomState
297
+ torch_generator = torch.Generator().manual_seed(int(generator.get_state()[1][0]))
298
+
299
+ #do_classifier_free_guidance = guidance_scale > 1.0
300
+
301
+ # NOTE: when a LCM is distilled from an LDM via latent consistency distillation (Algorithm 1) with guided
302
+ # distillation, the forward pass of the LCM learns to approximate sampling from the LDM using CFG with the
303
+ # unconditional prompt "" (the empty string). Due to this, LCMs currently do not support negative prompts.
304
+ prompt_embeds = self._encode_prompt(
305
+ prompt,
306
+ num_images_per_prompt,
307
+ False,
308
+ negative_prompt=None,
309
+ prompt_embeds=prompt_embeds,
310
+ negative_prompt_embeds=None,
311
+ )
312
+
313
+ # set timesteps
314
+ self.scheduler.set_timesteps(num_inference_steps, "cpu", original_inference_steps=original_inference_steps)
315
+ timesteps = self.scheduler.timesteps
316
+
317
+ latents = self.prepare_latents(
318
+ batch_size * num_images_per_prompt,
319
+ self.unet.config.get("in_channels", 4),
320
+ height,
321
+ width,
322
+ prompt_embeds.dtype,
323
+ generator,
324
+ latents,
325
+ )
326
+
327
+ # Get Guidance Scale Embedding
328
+ w = np.tile(guidance_scale - 1, batch_size * num_images_per_prompt)
329
+ w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.get("time_cond_proj_dim", 256))
330
+
331
+ # Adapted from diffusers to extend it for other runtimes than ORT
332
+ timestep_dtype = self.unet.input_dtype.get("timestep", np.float32)
333
+
334
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
335
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
336
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
337
+ # and should be between [0, 1]
338
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
339
+ extra_step_kwargs = {}
340
+ if accepts_eta:
341
+ extra_step_kwargs["eta"] = eta
342
+
343
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
344
+ if accepts_generator:
345
+ extra_step_kwargs["generator"] = torch_generator
346
+
347
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
348
+ for i, t in enumerate(self.progress_bar(timesteps)):
349
+
350
+ # predict the noise residual
351
+ timestep = np.array([t], dtype=timestep_dtype)
352
+
353
+ noise_pred = self.unet(sample=latents, timestep=timestep, timestep_cond = w_embedding, encoder_hidden_states=prompt_embeds)[0]
354
+
355
+ # compute the previous noisy sample x_t -> x_t-1
356
+ latents, denoised = self.scheduler.step(
357
+ torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs, return_dict = False
358
+ )
359
+
360
+ latents, denoised = latents.numpy(), denoised.numpy()
361
+
362
+ # call the callback, if provided
363
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
364
+ if callback is not None and i % callback_steps == 0:
365
+ callback(i, t, latents)
366
+
367
+ if output_type == "latent":
368
+ image = latents
369
+ has_nsfw_concept = None
370
+ else:
371
+ denoised /= self.vae_decoder.config.get("scaling_factor", 0.18215)
372
+ # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
373
+ image = np.concatenate(
374
+ [self.vae_decoder(latent_sample=denoised[i : i + 1])[0] for i in range(latents.shape[0])]
375
+ )
376
+ image, has_nsfw_concept = self.run_safety_checker(image)
377
+
378
+ if has_nsfw_concept is None:
379
+ do_denormalize = [True] * image.shape[0]
380
+ else:
381
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
382
+
383
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
384
+
385
+ if not return_dict:
386
+ return (image, has_nsfw_concept)
387
+
388
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
lcm_scheduler.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+
25
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
26
+ from diffusers.utils import BaseOutput, logging
27
+ from diffusers.utils.torch_utils import randn_tensor
28
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
29
+
30
+
31
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+
34
+ @dataclass
35
+ class LCMSchedulerOutput(BaseOutput):
36
+ """
37
+ Output class for the scheduler's `step` function output.
38
+
39
+ Args:
40
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
41
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
42
+ denoising loop.
43
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
44
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
45
+ `pred_original_sample` can be used to preview progress or for guidance.
46
+ """
47
+
48
+ prev_sample: torch.FloatTensor
49
+ denoised: Optional[torch.FloatTensor] = None
50
+
51
+
52
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
53
+ def betas_for_alpha_bar(
54
+ num_diffusion_timesteps,
55
+ max_beta=0.999,
56
+ alpha_transform_type="cosine",
57
+ ):
58
+ """
59
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
60
+ (1-beta) over time from t = [0,1].
61
+
62
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
63
+ to that part of the diffusion process.
64
+
65
+
66
+ Args:
67
+ num_diffusion_timesteps (`int`): the number of betas to produce.
68
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
69
+ prevent singularities.
70
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
71
+ Choose from `cosine` or `exp`
72
+
73
+ Returns:
74
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
75
+ """
76
+ if alpha_transform_type == "cosine":
77
+
78
+ def alpha_bar_fn(t):
79
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
80
+
81
+ elif alpha_transform_type == "exp":
82
+
83
+ def alpha_bar_fn(t):
84
+ return math.exp(t * -12.0)
85
+
86
+ else:
87
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
88
+
89
+ betas = []
90
+ for i in range(num_diffusion_timesteps):
91
+ t1 = i / num_diffusion_timesteps
92
+ t2 = (i + 1) / num_diffusion_timesteps
93
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
94
+ return torch.tensor(betas, dtype=torch.float32)
95
+
96
+
97
+ # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
98
+ def rescale_zero_terminal_snr(betas: torch.FloatTensor) -> torch.FloatTensor:
99
+ """
100
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
101
+
102
+
103
+ Args:
104
+ betas (`torch.FloatTensor`):
105
+ the betas that the scheduler is being initialized with.
106
+
107
+ Returns:
108
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
109
+ """
110
+ # Convert betas to alphas_bar_sqrt
111
+ alphas = 1.0 - betas
112
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
113
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
114
+
115
+ # Store old values.
116
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
117
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
118
+
119
+ # Shift so the last timestep is zero.
120
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
121
+
122
+ # Scale so the first timestep is back to the old value.
123
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
124
+
125
+ # Convert alphas_bar_sqrt to betas
126
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
127
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
128
+ alphas = torch.cat([alphas_bar[0:1], alphas])
129
+ betas = 1 - alphas
130
+
131
+ return betas
132
+
133
+
134
+ class LCMScheduler(SchedulerMixin, ConfigMixin):
135
+ """
136
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
137
+ non-Markovian guidance.
138
+
139
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config
140
+ attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be
141
+ accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving
142
+ functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions.
143
+
144
+ Args:
145
+ num_train_timesteps (`int`, defaults to 1000):
146
+ The number of diffusion steps to train the model.
147
+ beta_start (`float`, defaults to 0.0001):
148
+ The starting `beta` value of inference.
149
+ beta_end (`float`, defaults to 0.02):
150
+ The final `beta` value.
151
+ beta_schedule (`str`, defaults to `"linear"`):
152
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
153
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
154
+ trained_betas (`np.ndarray`, *optional*):
155
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
156
+ original_inference_steps (`int`, *optional*, defaults to 50):
157
+ The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we
158
+ will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule.
159
+ clip_sample (`bool`, defaults to `True`):
160
+ Clip the predicted sample for numerical stability.
161
+ clip_sample_range (`float`, defaults to 1.0):
162
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
163
+ set_alpha_to_one (`bool`, defaults to `True`):
164
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
165
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
166
+ otherwise it uses the alpha value at step 0.
167
+ steps_offset (`int`, defaults to 0):
168
+ An offset added to the inference steps. You can use a combination of `offset=1` and
169
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
170
+ Diffusion.
171
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
172
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
173
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
174
+ Video](https://imagen.research.google/video/paper.pdf) paper).
175
+ thresholding (`bool`, defaults to `False`):
176
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
177
+ as Stable Diffusion.
178
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
179
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
180
+ sample_max_value (`float`, defaults to 1.0):
181
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
182
+ timestep_spacing (`str`, defaults to `"leading"`):
183
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
184
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
185
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
186
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
187
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
188
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
189
+ """
190
+
191
+ order = 1
192
+
193
+ @register_to_config
194
+ def __init__(
195
+ self,
196
+ num_train_timesteps: int = 1000,
197
+ beta_start: float = 0.00085,
198
+ beta_end: float = 0.012,
199
+ beta_schedule: str = "scaled_linear",
200
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
201
+ original_inference_steps: int = 50,
202
+ clip_sample: bool = False,
203
+ clip_sample_range: float = 1.0,
204
+ set_alpha_to_one: bool = True,
205
+ steps_offset: int = 0,
206
+ prediction_type: str = "epsilon",
207
+ thresholding: bool = False,
208
+ dynamic_thresholding_ratio: float = 0.995,
209
+ sample_max_value: float = 1.0,
210
+ timestep_spacing: str = "leading",
211
+ rescale_betas_zero_snr: bool = False,
212
+ ):
213
+ if trained_betas is not None:
214
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
215
+ elif beta_schedule == "linear":
216
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
217
+ elif beta_schedule == "scaled_linear":
218
+ # this schedule is very specific to the latent diffusion model.
219
+ self.betas = (
220
+ torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
221
+ )
222
+ elif beta_schedule == "squaredcos_cap_v2":
223
+ # Glide cosine schedule
224
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
225
+ else:
226
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
227
+
228
+ # Rescale for zero SNR
229
+ if rescale_betas_zero_snr:
230
+ self.betas = rescale_zero_terminal_snr(self.betas)
231
+
232
+ self.alphas = 1.0 - self.betas
233
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
234
+
235
+ # At every step in ddim, we are looking into the previous alphas_cumprod
236
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
237
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
238
+ # whether we use the final alpha of the "non-previous" one.
239
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
240
+
241
+ # standard deviation of the initial noise distribution
242
+ self.init_noise_sigma = 1.0
243
+
244
+ # setable values
245
+ self.num_inference_steps = None
246
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
247
+
248
+ self._step_index = None
249
+
250
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
251
+ def _init_step_index(self, timestep):
252
+ if isinstance(timestep, torch.Tensor):
253
+ timestep = timestep.to(self.timesteps.device)
254
+
255
+ index_candidates = (self.timesteps == timestep).nonzero()
256
+
257
+ # The sigma index that is taken for the **very** first `step`
258
+ # is always the second index (or the last index if there is only 1)
259
+ # This way we can ensure we don't accidentally skip a sigma in
260
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
261
+ if len(index_candidates) > 1:
262
+ step_index = index_candidates[1]
263
+ else:
264
+ step_index = index_candidates[0]
265
+
266
+ self._step_index = step_index.item()
267
+
268
+ @property
269
+ def step_index(self):
270
+ return self._step_index
271
+
272
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
273
+ """
274
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
275
+ current timestep.
276
+
277
+ Args:
278
+ sample (`torch.FloatTensor`):
279
+ The input sample.
280
+ timestep (`int`, *optional*):
281
+ The current timestep in the diffusion chain.
282
+ Returns:
283
+ `torch.FloatTensor`:
284
+ A scaled input sample.
285
+ """
286
+ return sample
287
+
288
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
289
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
290
+ """
291
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
292
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
293
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
294
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
295
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
296
+
297
+ https://arxiv.org/abs/2205.11487
298
+ """
299
+ dtype = sample.dtype
300
+ batch_size, channels, *remaining_dims = sample.shape
301
+
302
+ if dtype not in (torch.float32, torch.float64):
303
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
304
+
305
+ # Flatten sample for doing quantile calculation along each image
306
+ sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
307
+
308
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
309
+
310
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
311
+ s = torch.clamp(
312
+ s, min=1, max=self.config.sample_max_value
313
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
314
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
315
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
316
+
317
+ sample = sample.reshape(batch_size, channels, *remaining_dims)
318
+ sample = sample.to(dtype)
319
+
320
+ return sample
321
+
322
+ def set_timesteps(
323
+ self,
324
+ num_inference_steps: int,
325
+ device: Union[str, torch.device] = None,
326
+ original_inference_steps: Optional[int] = None,
327
+ ):
328
+ """
329
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
330
+
331
+ Args:
332
+ num_inference_steps (`int`):
333
+ The number of diffusion steps used when generating samples with a pre-trained model.
334
+ device (`str` or `torch.device`, *optional*):
335
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
336
+ original_inference_steps (`int`, *optional*):
337
+ The original number of inference steps, which will be used to generate a linearly-spaced timestep
338
+ schedule (which is different from the standard `diffusers` implementation). We will then take
339
+ `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as
340
+ our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.
341
+ """
342
+
343
+ if num_inference_steps > self.config.num_train_timesteps:
344
+ raise ValueError(
345
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
346
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
347
+ f" maximal {self.config.num_train_timesteps} timesteps."
348
+ )
349
+
350
+ self.num_inference_steps = num_inference_steps
351
+ original_steps = (
352
+ original_inference_steps if original_inference_steps is not None else self.original_inference_steps
353
+ )
354
+
355
+ if original_steps > self.config.num_train_timesteps:
356
+ raise ValueError(
357
+ f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:"
358
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
359
+ f" maximal {self.config.num_train_timesteps} timesteps."
360
+ )
361
+
362
+ if num_inference_steps > original_steps:
363
+ raise ValueError(
364
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:"
365
+ f" {original_steps} because the final timestep schedule will be a subset of the"
366
+ f" `original_inference_steps`-sized initial timestep schedule."
367
+ )
368
+
369
+ # LCM Timesteps Setting
370
+ # Currently, only linear spacing is supported.
371
+ c = self.config.num_train_timesteps // original_steps
372
+ # LCM Training Steps Schedule
373
+ lcm_origin_timesteps = np.asarray(list(range(1, original_steps + 1))) * c - 1
374
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
375
+ # LCM Inference Steps Schedule
376
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps]
377
+
378
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device=device, dtype=torch.long)
379
+
380
+ self._step_index = None
381
+
382
+ def get_scalings_for_boundary_condition_discrete(self, t):
383
+ self.sigma_data = 0.5 # Default: 0.5
384
+
385
+ # By dividing 0.1: This is almost a delta function at t=0.
386
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
387
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
388
+ return c_skip, c_out
389
+
390
+ def step(
391
+ self,
392
+ model_output: torch.FloatTensor,
393
+ timestep: int,
394
+ sample: torch.FloatTensor,
395
+ generator: Optional[torch.Generator] = None,
396
+ return_dict: bool = True,
397
+ ) -> Union[LCMSchedulerOutput, Tuple]:
398
+ """
399
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
400
+ process from the learned model outputs (most often the predicted noise).
401
+
402
+ Args:
403
+ model_output (`torch.FloatTensor`):
404
+ The direct output from learned diffusion model.
405
+ timestep (`float`):
406
+ The current discrete timestep in the diffusion chain.
407
+ sample (`torch.FloatTensor`):
408
+ A current instance of a sample created by the diffusion process.
409
+ generator (`torch.Generator`, *optional*):
410
+ A random number generator.
411
+ return_dict (`bool`, *optional*, defaults to `True`):
412
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
413
+ Returns:
414
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
415
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
416
+ tuple is returned where the first element is the sample tensor.
417
+ """
418
+ if self.num_inference_steps is None:
419
+ raise ValueError(
420
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
421
+ )
422
+
423
+ if self.step_index is None:
424
+ self._init_step_index(timestep)
425
+
426
+ # 1. get previous step value
427
+ prev_step_index = self.step_index + 1
428
+ if prev_step_index < len(self.timesteps):
429
+ prev_timestep = self.timesteps[prev_step_index]
430
+ else:
431
+ prev_timestep = timestep
432
+
433
+ # 2. compute alphas, betas
434
+ alpha_prod_t = self.alphas_cumprod[timestep]
435
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
436
+
437
+ beta_prod_t = 1 - alpha_prod_t
438
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
439
+
440
+ # 3. Get scalings for boundary conditions
441
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
442
+
443
+ # 4. Compute the predicted original sample x_0 based on the model parameterization
444
+ if self.config.prediction_type == "epsilon": # noise-prediction
445
+ predicted_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
446
+ elif self.config.prediction_type == "sample": # x-prediction
447
+ predicted_original_sample = model_output
448
+ elif self.config.prediction_type == "v_prediction": # v-prediction
449
+ predicted_original_sample = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
450
+ else:
451
+ raise ValueError(
452
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
453
+ " `v_prediction` for `LCMScheduler`."
454
+ )
455
+
456
+ # 5. Clip or threshold "predicted x_0"
457
+ if self.config.thresholding:
458
+ predicted_original_sample = self._threshold_sample(predicted_original_sample)
459
+ elif self.config.clip_sample:
460
+ predicted_original_sample = predicted_original_sample.clamp(
461
+ -self.config.clip_sample_range, self.config.clip_sample_range
462
+ )
463
+
464
+ # 6. Denoise model output using boundary conditions
465
+ denoised = c_out * predicted_original_sample + c_skip * sample
466
+
467
+ # 7. Sample and inject noise z ~ N(0, I) for MultiStep Inference
468
+ # Noise is not used for one-step sampling.
469
+ if len(self.timesteps) > 1:
470
+ noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device)
471
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
472
+ else:
473
+ prev_sample = denoised
474
+
475
+ # upon completion increase step index by one
476
+ self._step_index += 1
477
+
478
+ if not return_dict:
479
+ return (prev_sample, denoised)
480
+
481
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
482
+
483
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
484
+ def add_noise(
485
+ self,
486
+ original_samples: torch.FloatTensor,
487
+ noise: torch.FloatTensor,
488
+ timesteps: torch.IntTensor,
489
+ ) -> torch.FloatTensor:
490
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
491
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
492
+ timesteps = timesteps.to(original_samples.device)
493
+
494
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
495
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
496
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
497
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
498
+
499
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
500
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
501
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
502
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
503
+
504
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
505
+ return noisy_samples
506
+
507
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
508
+ def get_velocity(
509
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
510
+ ) -> torch.FloatTensor:
511
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
512
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
513
+ timesteps = timesteps.to(sample.device)
514
+
515
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
516
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
517
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
518
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
519
+
520
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
521
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
522
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
523
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
524
+
525
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
526
+ return velocity
527
+
528
+ def __len__(self):
529
+ return self.config.num_train_timesteps
model_index.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.20.2",
4
+ "_name_or_path": "SimianLuo/LCM_Dreamshaper_v7",
5
+ "feature_extractor": [
6
+ "transformers",
7
+ "CLIPImageProcessor"
8
+ ],
9
+ "requires_safety_checker": true,
10
+ "safety_checker": [
11
+ "stable_diffusion",
12
+ "StableDiffusionSafetyChecker"
13
+ ],
14
+ "scheduler": [
15
+ null,
16
+ null
17
+ ],
18
+ "text_encoder": [
19
+ "transformers",
20
+ "CLIPTextModel"
21
+ ],
22
+ "tokenizer": [
23
+ "transformers",
24
+ "CLIPTokenizer"
25
+ ],
26
+ "unet": [
27
+ "diffusers",
28
+ "UNet2DConditionModel"
29
+ ],
30
+ "vae": [
31
+ "diffusers",
32
+ "AutoencoderKL"
33
+ ]
34
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "LCMScheduler",
3
+ "_diffusers_version": "0.22.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "original_inference_steps": 50,
12
+ "prediction_type": "epsilon",
13
+ "rescale_betas_zero_snr": false,
14
+ "sample_max_value": 1.0,
15
+ "set_alpha_to_one": true,
16
+ "steps_offset": 0,
17
+ "thresholding": false,
18
+ "timestep_spacing": "leading",
19
+ "trained_betas": null
20
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/user/.cache/huggingface/hub/models--SimianLuo--LCM_Dreamshaper_v7/snapshots/c7f9b672c65a664af57d1de926819fd79cb26eb8/text_encoder",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.31.0",
24
+ "vocab_size": 49408
25
+ }
text_encoder/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e2783c544f9432ab465bc1222eae65b5fa51ebbbb5fb981633c30892dec3a4
3
+ size 492242764
text_encoder/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|startoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "additional_special_tokens": [],
22
+ "bos_token": {
23
+ "__type": "AddedToken",
24
+ "content": "<|startoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": {
33
+ "__type": "AddedToken",
34
+ "content": "<|endoftext|>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "errors": "replace",
41
+ "model_max_length": 77,
42
+ "pad_token": "<|endoftext|>",
43
+ "tokenizer_class": "CLIPTokenizer",
44
+ "tokenizer_file": null,
45
+ "unk_token": {
46
+ "__type": "AddedToken",
47
+ "content": "<|endoftext|>",
48
+ "lstrip": false,
49
+ "normalized": true,
50
+ "rstrip": false,
51
+ "single_word": false
52
+ }
53
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.20.2",
4
+ "_name_or_path": "/home/user/.cache/huggingface/hub/models--SimianLuo--LCM_Dreamshaper_v7/snapshots/c7f9b672c65a664af57d1de926819fd79cb26eb8/unet",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": null,
7
+ "addition_embed_type_num_heads": 64,
8
+ "addition_time_embed_dim": null,
9
+ "attention_head_dim": 8,
10
+ "attention_type": "default",
11
+ "block_out_channels": [
12
+ 320,
13
+ 640,
14
+ 1280,
15
+ 1280
16
+ ],
17
+ "center_input_sample": false,
18
+ "class_embed_type": null,
19
+ "class_embeddings_concat": false,
20
+ "conv_in_kernel": 3,
21
+ "conv_out_kernel": 3,
22
+ "cross_attention_dim": 768,
23
+ "cross_attention_norm": null,
24
+ "down_block_types": [
25
+ "CrossAttnDownBlock2D",
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "DownBlock2D"
29
+ ],
30
+ "downsample_padding": 1,
31
+ "dropout": 0.0,
32
+ "dual_cross_attention": false,
33
+ "encoder_hid_dim": null,
34
+ "encoder_hid_dim_type": null,
35
+ "flip_sin_to_cos": true,
36
+ "freq_shift": 0,
37
+ "in_channels": 4,
38
+ "layers_per_block": 2,
39
+ "mid_block_only_cross_attention": null,
40
+ "mid_block_scale_factor": 1,
41
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
42
+ "norm_eps": 1e-05,
43
+ "norm_num_groups": 32,
44
+ "num_attention_heads": null,
45
+ "num_class_embeds": null,
46
+ "only_cross_attention": false,
47
+ "out_channels": 4,
48
+ "projection_class_embeddings_input_dim": null,
49
+ "resnet_out_scale_factor": 1.0,
50
+ "resnet_skip_time_act": false,
51
+ "resnet_time_scale_shift": "default",
52
+ "sample_size": 96,
53
+ "time_cond_proj_dim": 256,
54
+ "time_embedding_act_fn": null,
55
+ "time_embedding_dim": null,
56
+ "time_embedding_type": "positional",
57
+ "timestep_post_act": null,
58
+ "transformer_layers_per_block": 1,
59
+ "up_block_types": [
60
+ "UpBlock2D",
61
+ "CrossAttnUpBlock2D",
62
+ "CrossAttnUpBlock2D",
63
+ "CrossAttnUpBlock2D"
64
+ ],
65
+ "upcast_attention": null,
66
+ "use_linear_projection": false
67
+ }
unet/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc40724121308b865c3ffbae8c39991a2f3510e9251f3b0c59db37833628bc38
3
+ size 3438412388
unet/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff
 
vae_decoder/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.20.2",
4
+ "_name_or_path": "/home/user/.cache/huggingface/hub/models--SimianLuo--LCM_Dreamshaper_v7/snapshots/c7f9b672c65a664af57d1de926819fd79cb26eb8/vae",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 768,
25
+ "scaling_factor": 0.18215,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
vae_decoder/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81a8010713a2554061d107967f6a6b301386f661684da510f6035eb379782679
3
+ size 197960932
vae_decoder/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff
 
vae_encoder/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.20.2",
4
+ "_name_or_path": "/home/user/.cache/huggingface/hub/models--SimianLuo--LCM_Dreamshaper_v7/snapshots/c7f9b672c65a664af57d1de926819fd79cb26eb8/vae",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 768,
25
+ "scaling_factor": 0.18215,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
vae_encoder/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0198e0db626c241c992904f9a34be172e895028c7dcbb63c66ced4e8e496a8a
3
+ size 136654796
vae_encoder/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff