rupeshs commited on
Commit
6bcacf9
1 Parent(s): 767ebde

added files

Browse files
Files changed (36) hide show
  1. __init__.py +0 -0
  2. app.py +123 -5
  3. app_settings.py +46 -0
  4. backend/__init__.py +0 -0
  5. backend/__pycache__/__init__.cpython-311.pyc +0 -0
  6. backend/__pycache__/image_saver.cpython-311.pyc +0 -0
  7. backend/__pycache__/lcm_text_to_image.cpython-311.pyc +0 -0
  8. backend/image_saver.py +39 -0
  9. backend/lcm_text_to_image.py +115 -0
  10. backend/lcmdiffusion/pipelines/latent_consistency_txt2img.py +730 -0
  11. backend/lcmdiffusion/pipelines/openvino/__pycache__/lcm_ov_pipeline.cpython-311.pyc +0 -0
  12. backend/lcmdiffusion/pipelines/openvino/__pycache__/lcm_scheduler.cpython-311.pyc +0 -0
  13. backend/lcmdiffusion/pipelines/openvino/lcm_ov_pipeline.py +390 -0
  14. backend/lcmdiffusion/pipelines/openvino/lcm_scheduler.py +529 -0
  15. backend/models/__pycache__/lcmdiffusion_setting.cpython-311.pyc +0 -0
  16. backend/models/lcmdiffusion_setting.py +19 -0
  17. constants.py +10 -0
  18. context.py +44 -0
  19. frontend/__pycache__/utils.cpython-311.pyc +0 -0
  20. frontend/gui/__pycache__/app_window.cpython-311.pyc +0 -0
  21. frontend/gui/__pycache__/image_generator_worker.cpython-311.pyc +0 -0
  22. frontend/gui/__pycache__/ui.cpython-311.pyc +0 -0
  23. frontend/gui/app_window.py +435 -0
  24. frontend/gui/image_generator_worker.py +37 -0
  25. frontend/gui/ui.py +15 -0
  26. frontend/utils.py +32 -0
  27. frontend/webui/css/style.css +24 -0
  28. frontend/webui/text_to_image_ui.py +179 -0
  29. frontend/webui/ui.py +36 -0
  30. models/__pycache__/interface_types.cpython-311.pyc +0 -0
  31. models/__pycache__/settings.cpython-311.pyc +0 -0
  32. models/interface_types.py +7 -0
  33. models/settings.py +8 -0
  34. paths.py +48 -0
  35. requirements.txt +13 -0
  36. utils.py +10 -0
__init__.py ADDED
File without changes
app.py CHANGED
@@ -1,7 +1,125 @@
1
- import gradio as gr
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from app_settings import AppSettings
2
+ from utils import show_system_info
3
+ import constants
4
+ from argparse import ArgumentParser
5
+ from context import Context
6
+ from constants import APP_VERSION, LCM_DEFAULT_MODEL_OPENVINO
7
+ from models.interface_types import InterfaceType
8
+ from constants import DEVICE
9
 
10
+ parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
11
+ parser.add_argument(
12
+ "-s",
13
+ "--share",
14
+ action="store_true",
15
+ help="Create sharable link(Web UI)",
16
+ required=False,
17
+ )
18
+ group = parser.add_mutually_exclusive_group(required=False)
19
+ group.add_argument(
20
+ "-g",
21
+ "--gui",
22
+ action="store_true",
23
+ help="Start desktop GUI",
24
+ )
25
+ group.add_argument(
26
+ "-w",
27
+ "--webui",
28
+ action="store_true",
29
+ help="Start Web UI",
30
+ )
31
+ group.add_argument(
32
+ "-v",
33
+ "--version",
34
+ action="store_true",
35
+ help="Version",
36
+ )
37
+ parser.add_argument(
38
+ "--lcm_model_id",
39
+ type=str,
40
+ help="Model ID or path,Default SimianLuo/LCM_Dreamshaper_v7",
41
+ default="SimianLuo/LCM_Dreamshaper_v7",
42
+ )
43
+ parser.add_argument(
44
+ "--prompt",
45
+ type=str,
46
+ help="Describe the image you want to generate",
47
+ )
48
+ parser.add_argument(
49
+ "--image_height",
50
+ type=int,
51
+ help="Height of the image",
52
+ default=512,
53
+ )
54
+ parser.add_argument(
55
+ "--image_width",
56
+ type=int,
57
+ help="Width of the image",
58
+ default=512,
59
+ )
60
+ parser.add_argument(
61
+ "--inference_steps",
62
+ type=int,
63
+ help="Number of steps,default : 4",
64
+ default=4,
65
+ )
66
+ parser.add_argument(
67
+ "--guidance_scale",
68
+ type=int,
69
+ help="Guidance scale,default : 8.0",
70
+ default=8.0,
71
+ )
72
 
73
+ parser.add_argument(
74
+ "--number_of_images",
75
+ type=int,
76
+ help="Number of images to generate ,default : 1",
77
+ default=1,
78
+ )
79
+ parser.add_argument(
80
+ "--seed",
81
+ type=int,
82
+ help="Seed,default : -1 (disabled) ",
83
+ default=-1,
84
+ )
85
+ parser.add_argument(
86
+ "--use_openvino",
87
+ action="store_true",
88
+ help="Use OpenVINO model",
89
+ )
90
+
91
+ parser.add_argument(
92
+ "--use_offline_model",
93
+ action="store_true",
94
+ help="Use offline model",
95
+ )
96
+ parser.add_argument(
97
+ "--use_safety_checker",
98
+ action="store_false",
99
+ help="Use safety checker",
100
+ )
101
+
102
+ parser.add_argument(
103
+ "-i",
104
+ "--interactive",
105
+ action="store_true",
106
+ help="Interactive CLI mode",
107
+ )
108
+
109
+ args = parser.parse_args()
110
+
111
+ if args.version:
112
+ print(APP_VERSION)
113
+ exit()
114
+
115
+ parser.print_help()
116
+ show_system_info()
117
+ print(f"Using device : {constants.DEVICE}")
118
+
119
+ app_settings = AppSettings()
120
+ app_settings.load()
121
+
122
+
123
+ start_webui(
124
+ app_settings,
125
+ args.share,)
app_settings.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from os import path, makedirs
3
+ from models.settings import Settings
4
+ from paths import FastStableDiffusionPaths
5
+
6
+
7
+ class AppSettings:
8
+ def __init__(self):
9
+ self.config_path = FastStableDiffusionPaths().get_app_settings_path()
10
+
11
+ @property
12
+ def settings(self):
13
+ return self._config
14
+
15
+ def load(self):
16
+ if not path.exists(self.config_path):
17
+ base_dir = path.dirname(self.config_path)
18
+ if not path.exists(base_dir):
19
+ makedirs(base_dir)
20
+ try:
21
+ print("Settings not found creating default settings")
22
+ with open(self.config_path, "w") as file:
23
+ yaml.dump(
24
+ self._load_default(),
25
+ file,
26
+ )
27
+ except Exception as ex:
28
+ print(f"Error in creating settings : {ex}")
29
+ exit()
30
+ try:
31
+ with open(self.config_path) as file:
32
+ settings_dict = yaml.safe_load(file)
33
+ self._config = Settings.parse_obj(settings_dict)
34
+ except Exception as ex:
35
+ print(f"Error in loading settings : {ex}")
36
+
37
+ def save(self):
38
+ try:
39
+ with open(self.config_path, "w") as file:
40
+ yaml.dump(self._config.dict(), file)
41
+ except Exception as ex:
42
+ print(f"Error in saving settings : {ex}")
43
+
44
+ def _load_default(self) -> dict:
45
+ defult_config = Settings()
46
+ return defult_config.dict()
backend/__init__.py ADDED
File without changes
backend/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (178 Bytes). View file
 
backend/__pycache__/image_saver.cpython-311.pyc ADDED
Binary file (2.26 kB). View file
 
backend/__pycache__/lcm_text_to_image.cpython-311.pyc ADDED
Binary file (4.72 kB). View file
 
backend/image_saver.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os import path, mkdir
2
+ from typing import Any
3
+ from uuid import uuid4
4
+ from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
5
+ import json
6
+
7
+
8
+ class ImageSaver:
9
+ @staticmethod
10
+ def save_images(
11
+ output_path: str,
12
+ images: Any,
13
+ folder_name: str = "",
14
+ format: str = ".png",
15
+ lcm_diffusion_setting: LCMDiffusionSetting = None,
16
+ ) -> None:
17
+ gen_id = uuid4()
18
+ for index, image in enumerate(images):
19
+ if not path.exists(output_path):
20
+ mkdir(output_path)
21
+
22
+ if folder_name:
23
+ out_path = path.join(
24
+ output_path,
25
+ folder_name,
26
+ )
27
+ else:
28
+ out_path = output_path
29
+
30
+ if not path.exists(out_path):
31
+ mkdir(out_path)
32
+ image.save(path.join(out_path, f"{gen_id}-{index+1}{format}"))
33
+ if lcm_diffusion_setting:
34
+ with open(path.join(out_path, f"{gen_id}.json"), "w") as json_file:
35
+ json.dump(
36
+ lcm_diffusion_setting.model_dump(),
37
+ json_file,
38
+ indent=4,
39
+ )
backend/lcm_text_to_image.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from diffusers import DiffusionPipeline
3
+ from os import path
4
+ import torch
5
+ from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
6
+ import numpy as np
7
+ from constants import DEVICE
8
+
9
+
10
+ if DEVICE == "cpu":
11
+ from backend.lcmdiffusion.pipelines.openvino.lcm_ov_pipeline import (
12
+ OVLatentConsistencyModelPipeline,
13
+ )
14
+ from backend.lcmdiffusion.pipelines.openvino.lcm_scheduler import (
15
+ LCMScheduler,
16
+ )
17
+
18
+
19
+ class LCMTextToImage:
20
+ def __init__(
21
+ self,
22
+ device: str = "cpu",
23
+ ) -> None:
24
+ self.pipeline = None
25
+ self.use_openvino = False
26
+ self.device = None
27
+ self.previous_model_id = None
28
+
29
+ def _get_lcm_diffusion_pipeline_path(self) -> str:
30
+ script_path = path.dirname(path.abspath(__file__))
31
+ file_path = path.join(
32
+ script_path,
33
+ "lcmdiffusion",
34
+ "pipelines",
35
+ "latent_consistency_txt2img.py",
36
+ )
37
+ return file_path
38
+
39
+ def init(
40
+ self,
41
+ model_id: str,
42
+ use_openvino: bool = False,
43
+ device: str = "cpu",
44
+ use_local_model: bool = False,
45
+ ) -> None:
46
+ self.device = device
47
+ self.use_openvino = use_openvino
48
+ if self.pipeline is None or self.previous_model_id != model_id:
49
+ if self.use_openvino and DEVICE == "cpu":
50
+ if self.pipeline:
51
+ del self.pipeline
52
+ scheduler = LCMScheduler.from_pretrained(
53
+ model_id,
54
+ subfolder="scheduler",
55
+ )
56
+ self.pipeline = OVLatentConsistencyModelPipeline.from_pretrained(
57
+ model_id,
58
+ scheduler=scheduler,
59
+ compile=False,
60
+ local_files_only=use_local_model,
61
+ )
62
+ else:
63
+ if self.pipeline:
64
+ del self.pipeline
65
+
66
+ self.pipeline = DiffusionPipeline.from_pretrained(
67
+ model_id,
68
+ custom_pipeline=self._get_lcm_diffusion_pipeline_path(),
69
+ custom_revision="main",
70
+ local_files_only=use_local_model,
71
+ )
72
+ self.pipeline.to(
73
+ torch_device=self.device,
74
+ torch_dtype=torch.float32,
75
+ )
76
+ self.previous_model_id = model_id
77
+
78
+ def generate(
79
+ self,
80
+ lcm_diffusion_setting: LCMDiffusionSetting,
81
+ reshape: bool = False,
82
+ ) -> Any:
83
+ if lcm_diffusion_setting.use_seed:
84
+ cur_seed = lcm_diffusion_setting.seed
85
+ if self.use_openvino:
86
+ np.random.seed(cur_seed)
87
+ else:
88
+ torch.manual_seed(cur_seed)
89
+
90
+ if self.use_openvino and DEVICE == "cpu":
91
+ print("Using OpenVINO")
92
+ if reshape:
93
+ print("Reshape and compile")
94
+ self.pipeline.reshape(
95
+ batch_size=1,
96
+ height=lcm_diffusion_setting.image_height,
97
+ width=lcm_diffusion_setting.image_width,
98
+ num_images_per_prompt=lcm_diffusion_setting.number_of_images,
99
+ )
100
+ self.pipeline.compile()
101
+
102
+ if not lcm_diffusion_setting.use_safety_checker:
103
+ self.pipeline.safety_checker = None
104
+
105
+ result_images = self.pipeline(
106
+ prompt=lcm_diffusion_setting.prompt,
107
+ num_inference_steps=lcm_diffusion_setting.inference_steps,
108
+ guidance_scale=lcm_diffusion_setting.guidance_scale,
109
+ width=lcm_diffusion_setting.image_width,
110
+ height=lcm_diffusion_setting.image_height,
111
+ output_type="pil",
112
+ num_images_per_prompt=lcm_diffusion_setting.number_of_images,
113
+ ).images
114
+
115
+ return result_images
backend/lcmdiffusion/pipelines/latent_consistency_txt2img.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
25
+
26
+ from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
27
+ from diffusers.configuration_utils import register_to_config
28
+ from diffusers.image_processor import VaeImageProcessor
29
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
30
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
31
+ from diffusers.utils import BaseOutput
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ class LatentConsistencyModelPipeline(DiffusionPipeline):
38
+ _optional_components = ["scheduler"]
39
+
40
+ def __init__(
41
+ self,
42
+ vae: AutoencoderKL,
43
+ text_encoder: CLIPTextModel,
44
+ tokenizer: CLIPTokenizer,
45
+ unet: UNet2DConditionModel,
46
+ scheduler: "LCMScheduler",
47
+ safety_checker: StableDiffusionSafetyChecker,
48
+ feature_extractor: CLIPImageProcessor,
49
+ requires_safety_checker: bool = True,
50
+ ):
51
+ super().__init__()
52
+
53
+ scheduler = (
54
+ scheduler
55
+ if scheduler is not None
56
+ else LCMScheduler(
57
+ beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
58
+ )
59
+ )
60
+
61
+ self.register_modules(
62
+ vae=vae,
63
+ text_encoder=text_encoder,
64
+ tokenizer=tokenizer,
65
+ unet=unet,
66
+ scheduler=scheduler,
67
+ safety_checker=safety_checker,
68
+ feature_extractor=feature_extractor,
69
+ )
70
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
71
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
72
+
73
+ def _encode_prompt(
74
+ self,
75
+ prompt,
76
+ device,
77
+ num_images_per_prompt,
78
+ prompt_embeds: None,
79
+ ):
80
+ r"""
81
+ Encodes the prompt into text encoder hidden states.
82
+ Args:
83
+ prompt (`str` or `List[str]`, *optional*):
84
+ prompt to be encoded
85
+ device: (`torch.device`):
86
+ torch device
87
+ num_images_per_prompt (`int`):
88
+ number of images that should be generated per prompt
89
+ prompt_embeds (`torch.FloatTensor`, *optional*):
90
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
91
+ provided, text embeddings will be generated from `prompt` input argument.
92
+ """
93
+
94
+ if prompt is not None and isinstance(prompt, str):
95
+ pass
96
+ elif prompt is not None and isinstance(prompt, list):
97
+ len(prompt)
98
+ else:
99
+ prompt_embeds.shape[0]
100
+
101
+ if prompt_embeds is None:
102
+ text_inputs = self.tokenizer(
103
+ prompt,
104
+ padding="max_length",
105
+ max_length=self.tokenizer.model_max_length,
106
+ truncation=True,
107
+ return_tensors="pt",
108
+ )
109
+ text_input_ids = text_inputs.input_ids
110
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
111
+
112
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
113
+ text_input_ids, untruncated_ids
114
+ ):
115
+ removed_text = self.tokenizer.batch_decode(
116
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
117
+ )
118
+ logger.warning(
119
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
120
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
121
+ )
122
+
123
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
124
+ attention_mask = text_inputs.attention_mask.to(device)
125
+ else:
126
+ attention_mask = None
127
+
128
+ prompt_embeds = self.text_encoder(
129
+ text_input_ids.to(device),
130
+ attention_mask=attention_mask,
131
+ )
132
+ prompt_embeds = prompt_embeds[0]
133
+
134
+ if self.text_encoder is not None:
135
+ prompt_embeds_dtype = self.text_encoder.dtype
136
+ elif self.unet is not None:
137
+ prompt_embeds_dtype = self.unet.dtype
138
+ else:
139
+ prompt_embeds_dtype = prompt_embeds.dtype
140
+
141
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
142
+
143
+ bs_embed, seq_len, _ = prompt_embeds.shape
144
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
145
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
146
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
147
+
148
+ # Don't need to get uncond prompt embedding because of LCM Guided Distillation
149
+ return prompt_embeds
150
+
151
+ def run_safety_checker(self, image, device, dtype):
152
+ if self.safety_checker is None:
153
+ has_nsfw_concept = None
154
+ else:
155
+ if torch.is_tensor(image):
156
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
157
+ else:
158
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
159
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
160
+ image, has_nsfw_concept = self.safety_checker(
161
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
162
+ )
163
+ return image, has_nsfw_concept
164
+
165
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):
166
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
167
+ if latents is None:
168
+ latents = torch.randn(shape, dtype=dtype).to(device)
169
+ else:
170
+ latents = latents.to(device)
171
+ # scale the initial noise by the standard deviation required by the scheduler
172
+ latents = latents * self.scheduler.init_noise_sigma
173
+ return latents
174
+
175
+ def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
176
+ """
177
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
178
+ Args:
179
+ timesteps: torch.Tensor: generate embedding vectors at these timesteps
180
+ embedding_dim: int: dimension of the embeddings to generate
181
+ dtype: data type of the generated embeddings
182
+ Returns:
183
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
184
+ """
185
+ assert len(w.shape) == 1
186
+ w = w * 1000.0
187
+
188
+ half_dim = embedding_dim // 2
189
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
190
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
191
+ emb = w.to(dtype)[:, None] * emb[None, :]
192
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
193
+ if embedding_dim % 2 == 1: # zero pad
194
+ emb = torch.nn.functional.pad(emb, (0, 1))
195
+ assert emb.shape == (w.shape[0], embedding_dim)
196
+ return emb
197
+
198
+ @torch.no_grad()
199
+ def __call__(
200
+ self,
201
+ prompt: Union[str, List[str]] = None,
202
+ height: Optional[int] = 768,
203
+ width: Optional[int] = 768,
204
+ guidance_scale: float = 7.5,
205
+ num_images_per_prompt: Optional[int] = 1,
206
+ latents: Optional[torch.FloatTensor] = None,
207
+ num_inference_steps: int = 4,
208
+ lcm_origin_steps: int = 50,
209
+ prompt_embeds: Optional[torch.FloatTensor] = None,
210
+ output_type: Optional[str] = "pil",
211
+ return_dict: bool = True,
212
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
213
+ ):
214
+ # 0. Default height and width to unet
215
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
216
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
217
+
218
+ # 2. Define call parameters
219
+ if prompt is not None and isinstance(prompt, str):
220
+ batch_size = 1
221
+ elif prompt is not None and isinstance(prompt, list):
222
+ batch_size = len(prompt)
223
+ else:
224
+ batch_size = prompt_embeds.shape[0]
225
+
226
+ device = self._execution_device
227
+ # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
228
+
229
+ # 3. Encode input prompt
230
+ prompt_embeds = self._encode_prompt(
231
+ prompt,
232
+ device,
233
+ num_images_per_prompt,
234
+ prompt_embeds=prompt_embeds,
235
+ )
236
+
237
+ # 4. Prepare timesteps
238
+ self.scheduler.set_timesteps(num_inference_steps, lcm_origin_steps)
239
+ timesteps = self.scheduler.timesteps
240
+
241
+ # 5. Prepare latent variable
242
+ num_channels_latents = self.unet.config.in_channels
243
+ latents = self.prepare_latents(
244
+ batch_size * num_images_per_prompt,
245
+ num_channels_latents,
246
+ height,
247
+ width,
248
+ prompt_embeds.dtype,
249
+ device,
250
+ latents,
251
+ )
252
+ bs = batch_size * num_images_per_prompt
253
+
254
+ # 6. Get Guidance Scale Embedding
255
+ w = torch.tensor(guidance_scale).repeat(bs)
256
+ w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
257
+
258
+ # 7. LCM MultiStep Sampling Loop:
259
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
260
+ for i, t in enumerate(timesteps):
261
+ ts = torch.full((bs,), t, device=device, dtype=torch.long)
262
+ latents = latents.to(prompt_embeds.dtype)
263
+
264
+ # model prediction (v-prediction, eps, x)
265
+ model_pred = self.unet(
266
+ latents,
267
+ ts,
268
+ timestep_cond=w_embedding,
269
+ encoder_hidden_states=prompt_embeds,
270
+ cross_attention_kwargs=cross_attention_kwargs,
271
+ return_dict=False,
272
+ )[0]
273
+
274
+ # compute the previous noisy sample x_t -> x_t-1
275
+ latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
276
+
277
+ # # call the callback, if provided
278
+ # if i == len(timesteps) - 1:
279
+ progress_bar.update()
280
+
281
+ denoised = denoised.to(prompt_embeds.dtype)
282
+ if not output_type == "latent":
283
+ image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
284
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
285
+ else:
286
+ image = denoised
287
+ has_nsfw_concept = None
288
+
289
+ if has_nsfw_concept is None:
290
+ do_denormalize = [True] * image.shape[0]
291
+ else:
292
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
293
+
294
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
295
+
296
+ if not return_dict:
297
+ return (image, has_nsfw_concept)
298
+
299
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
300
+
301
+
302
+ @dataclass
303
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
304
+ class LCMSchedulerOutput(BaseOutput):
305
+ """
306
+ Output class for the scheduler's `step` function output.
307
+ Args:
308
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
309
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
310
+ denoising loop.
311
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
312
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
313
+ `pred_original_sample` can be used to preview progress or for guidance.
314
+ """
315
+
316
+ prev_sample: torch.FloatTensor
317
+ denoised: Optional[torch.FloatTensor] = None
318
+
319
+
320
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
321
+ def betas_for_alpha_bar(
322
+ num_diffusion_timesteps,
323
+ max_beta=0.999,
324
+ alpha_transform_type="cosine",
325
+ ):
326
+ """
327
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
328
+ (1-beta) over time from t = [0,1].
329
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
330
+ to that part of the diffusion process.
331
+ Args:
332
+ num_diffusion_timesteps (`int`): the number of betas to produce.
333
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
334
+ prevent singularities.
335
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
336
+ Choose from `cosine` or `exp`
337
+ Returns:
338
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
339
+ """
340
+ if alpha_transform_type == "cosine":
341
+
342
+ def alpha_bar_fn(t):
343
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
344
+
345
+ elif alpha_transform_type == "exp":
346
+
347
+ def alpha_bar_fn(t):
348
+ return math.exp(t * -12.0)
349
+
350
+ else:
351
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
352
+
353
+ betas = []
354
+ for i in range(num_diffusion_timesteps):
355
+ t1 = i / num_diffusion_timesteps
356
+ t2 = (i + 1) / num_diffusion_timesteps
357
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
358
+ return torch.tensor(betas, dtype=torch.float32)
359
+
360
+
361
+ def rescale_zero_terminal_snr(betas):
362
+ """
363
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
364
+ Args:
365
+ betas (`torch.FloatTensor`):
366
+ the betas that the scheduler is being initialized with.
367
+ Returns:
368
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
369
+ """
370
+ # Convert betas to alphas_bar_sqrt
371
+ alphas = 1.0 - betas
372
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
373
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
374
+
375
+ # Store old values.
376
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
377
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
378
+
379
+ # Shift so the last timestep is zero.
380
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
381
+
382
+ # Scale so the first timestep is back to the old value.
383
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
384
+
385
+ # Convert alphas_bar_sqrt to betas
386
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
387
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
388
+ alphas = torch.cat([alphas_bar[0:1], alphas])
389
+ betas = 1 - alphas
390
+
391
+ return betas
392
+
393
+
394
+ class LCMScheduler(SchedulerMixin, ConfigMixin):
395
+ """
396
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
397
+ non-Markovian guidance.
398
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
399
+ methods the library implements for all schedulers such as loading and saving.
400
+ Args:
401
+ num_train_timesteps (`int`, defaults to 1000):
402
+ The number of diffusion steps to train the model.
403
+ beta_start (`float`, defaults to 0.0001):
404
+ The starting `beta` value of inference.
405
+ beta_end (`float`, defaults to 0.02):
406
+ The final `beta` value.
407
+ beta_schedule (`str`, defaults to `"linear"`):
408
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
409
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
410
+ trained_betas (`np.ndarray`, *optional*):
411
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
412
+ clip_sample (`bool`, defaults to `True`):
413
+ Clip the predicted sample for numerical stability.
414
+ clip_sample_range (`float`, defaults to 1.0):
415
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
416
+ set_alpha_to_one (`bool`, defaults to `True`):
417
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
418
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
419
+ otherwise it uses the alpha value at step 0.
420
+ steps_offset (`int`, defaults to 0):
421
+ An offset added to the inference steps. You can use a combination of `offset=1` and
422
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
423
+ Diffusion.
424
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
425
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
426
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
427
+ Video](https://imagen.research.google/video/paper.pdf) paper).
428
+ thresholding (`bool`, defaults to `False`):
429
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
430
+ as Stable Diffusion.
431
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
432
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
433
+ sample_max_value (`float`, defaults to 1.0):
434
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
435
+ timestep_spacing (`str`, defaults to `"leading"`):
436
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
437
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
438
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
439
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
440
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
441
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
442
+ """
443
+
444
+ # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
445
+ order = 1
446
+
447
+ @register_to_config
448
+ def __init__(
449
+ self,
450
+ num_train_timesteps: int = 1000,
451
+ beta_start: float = 0.0001,
452
+ beta_end: float = 0.02,
453
+ beta_schedule: str = "linear",
454
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
455
+ clip_sample: bool = True,
456
+ set_alpha_to_one: bool = True,
457
+ steps_offset: int = 0,
458
+ prediction_type: str = "epsilon",
459
+ thresholding: bool = False,
460
+ dynamic_thresholding_ratio: float = 0.995,
461
+ clip_sample_range: float = 1.0,
462
+ sample_max_value: float = 1.0,
463
+ timestep_spacing: str = "leading",
464
+ rescale_betas_zero_snr: bool = False,
465
+ ):
466
+ if trained_betas is not None:
467
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
468
+ elif beta_schedule == "linear":
469
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
470
+ elif beta_schedule == "scaled_linear":
471
+ # this schedule is very specific to the latent diffusion model.
472
+ self.betas = (
473
+ torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
474
+ )
475
+ elif beta_schedule == "squaredcos_cap_v2":
476
+ # Glide cosine schedule
477
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
478
+ else:
479
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
480
+
481
+ # Rescale for zero SNR
482
+ if rescale_betas_zero_snr:
483
+ self.betas = rescale_zero_terminal_snr(self.betas)
484
+
485
+ self.alphas = 1.0 - self.betas
486
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
487
+
488
+ # At every step in ddim, we are looking into the previous alphas_cumprod
489
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
490
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
491
+ # whether we use the final alpha of the "non-previous" one.
492
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
493
+
494
+ # standard deviation of the initial noise distribution
495
+ self.init_noise_sigma = 1.0
496
+
497
+ # setable values
498
+ self.num_inference_steps = None
499
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
500
+
501
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
502
+ """
503
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
504
+ current timestep.
505
+ Args:
506
+ sample (`torch.FloatTensor`):
507
+ The input sample.
508
+ timestep (`int`, *optional*):
509
+ The current timestep in the diffusion chain.
510
+ Returns:
511
+ `torch.FloatTensor`:
512
+ A scaled input sample.
513
+ """
514
+ return sample
515
+
516
+ def _get_variance(self, timestep, prev_timestep):
517
+ alpha_prod_t = self.alphas_cumprod[timestep]
518
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
519
+ beta_prod_t = 1 - alpha_prod_t
520
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
521
+
522
+ variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
523
+
524
+ return variance
525
+
526
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
527
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
528
+ """
529
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
530
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
531
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
532
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
533
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
534
+ https://arxiv.org/abs/2205.11487
535
+ """
536
+ dtype = sample.dtype
537
+ batch_size, channels, height, width = sample.shape
538
+
539
+ if dtype not in (torch.float32, torch.float64):
540
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
541
+
542
+ # Flatten sample for doing quantile calculation along each image
543
+ sample = sample.reshape(batch_size, channels * height * width)
544
+
545
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
546
+
547
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
548
+ s = torch.clamp(
549
+ s, min=1, max=self.config.sample_max_value
550
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
551
+
552
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
553
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
554
+
555
+ sample = sample.reshape(batch_size, channels, height, width)
556
+ sample = sample.to(dtype)
557
+
558
+ return sample
559
+
560
+ def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):
561
+ """
562
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
563
+ Args:
564
+ num_inference_steps (`int`):
565
+ The number of diffusion steps used when generating samples with a pre-trained model.
566
+ """
567
+
568
+ if num_inference_steps > self.config.num_train_timesteps:
569
+ raise ValueError(
570
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
571
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
572
+ f" maximal {self.config.num_train_timesteps} timesteps."
573
+ )
574
+
575
+ self.num_inference_steps = num_inference_steps
576
+
577
+ # LCM Timesteps Setting: # Linear Spacing
578
+ c = self.config.num_train_timesteps // lcm_origin_steps
579
+ lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule
580
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
581
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
582
+
583
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
584
+
585
+ def get_scalings_for_boundary_condition_discrete(self, t):
586
+ self.sigma_data = 0.5 # Default: 0.5
587
+
588
+ # By dividing 0.1: This is almost a delta function at t=0.
589
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
590
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
591
+ return c_skip, c_out
592
+
593
+ def step(
594
+ self,
595
+ model_output: torch.FloatTensor,
596
+ timeindex: int,
597
+ timestep: int,
598
+ sample: torch.FloatTensor,
599
+ eta: float = 0.0,
600
+ use_clipped_model_output: bool = False,
601
+ generator=None,
602
+ variance_noise: Optional[torch.FloatTensor] = None,
603
+ return_dict: bool = True,
604
+ ) -> Union[LCMSchedulerOutput, Tuple]:
605
+ """
606
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
607
+ process from the learned model outputs (most often the predicted noise).
608
+ Args:
609
+ model_output (`torch.FloatTensor`):
610
+ The direct output from learned diffusion model.
611
+ timestep (`float`):
612
+ The current discrete timestep in the diffusion chain.
613
+ sample (`torch.FloatTensor`):
614
+ A current instance of a sample created by the diffusion process.
615
+ eta (`float`):
616
+ The weight of noise for added noise in diffusion step.
617
+ use_clipped_model_output (`bool`, defaults to `False`):
618
+ If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
619
+ because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
620
+ clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
621
+ `use_clipped_model_output` has no effect.
622
+ generator (`torch.Generator`, *optional*):
623
+ A random number generator.
624
+ variance_noise (`torch.FloatTensor`):
625
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
626
+ itself. Useful for methods such as [`CycleDiffusion`].
627
+ return_dict (`bool`, *optional*, defaults to `True`):
628
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
629
+ Returns:
630
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
631
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
632
+ tuple is returned where the first element is the sample tensor.
633
+ """
634
+ if self.num_inference_steps is None:
635
+ raise ValueError(
636
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
637
+ )
638
+
639
+ # 1. get previous step value
640
+ prev_timeindex = timeindex + 1
641
+ if prev_timeindex < len(self.timesteps):
642
+ prev_timestep = self.timesteps[prev_timeindex]
643
+ else:
644
+ prev_timestep = timestep
645
+
646
+ # 2. compute alphas, betas
647
+ alpha_prod_t = self.alphas_cumprod[timestep]
648
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
649
+
650
+ beta_prod_t = 1 - alpha_prod_t
651
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
652
+
653
+ # 3. Get scalings for boundary conditions
654
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
655
+
656
+ # 4. Different Parameterization:
657
+ parameterization = self.config.prediction_type
658
+
659
+ if parameterization == "epsilon": # noise-prediction
660
+ pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
661
+
662
+ elif parameterization == "sample": # x-prediction
663
+ pred_x0 = model_output
664
+
665
+ elif parameterization == "v_prediction": # v-prediction
666
+ pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
667
+
668
+ # 4. Denoise model output using boundary conditions
669
+ denoised = c_out * pred_x0 + c_skip * sample
670
+
671
+ # 5. Sample z ~ N(0, I), For MultiStep Inference
672
+ # Noise is not used for one-step sampling.
673
+ if len(self.timesteps) > 1:
674
+ noise = torch.randn(model_output.shape).to(model_output.device)
675
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
676
+ else:
677
+ prev_sample = denoised
678
+
679
+ if not return_dict:
680
+ return (prev_sample, denoised)
681
+
682
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
683
+
684
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
685
+ def add_noise(
686
+ self,
687
+ original_samples: torch.FloatTensor,
688
+ noise: torch.FloatTensor,
689
+ timesteps: torch.IntTensor,
690
+ ) -> torch.FloatTensor:
691
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
692
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
693
+ timesteps = timesteps.to(original_samples.device)
694
+
695
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
696
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
697
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
698
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
699
+
700
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
701
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
702
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
703
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
704
+
705
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
706
+ return noisy_samples
707
+
708
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
709
+ def get_velocity(
710
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
711
+ ) -> torch.FloatTensor:
712
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
713
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
714
+ timesteps = timesteps.to(sample.device)
715
+
716
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
717
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
718
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
719
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
720
+
721
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
722
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
723
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
724
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
725
+
726
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
727
+ return velocity
728
+
729
+ def __len__(self):
730
+ return self.config.num_train_timesteps
backend/lcmdiffusion/pipelines/openvino/__pycache__/lcm_ov_pipeline.cpython-311.pyc ADDED
Binary file (21.5 kB). View file
 
backend/lcmdiffusion/pipelines/openvino/__pycache__/lcm_scheduler.cpython-311.pyc ADDED
Binary file (26.5 kB). View file
 
backend/lcmdiffusion/pipelines/openvino/lcm_ov_pipeline.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://huggingface.co/deinferno/LCM_Dreamshaper_v7-openvino
2
+
3
+ import inspect
4
+
5
+ from pathlib import Path
6
+ from tempfile import TemporaryDirectory
7
+ from typing import List, Optional, Tuple, Union, Dict, Any, Callable, OrderedDict
8
+
9
+ import numpy as np
10
+ import openvino
11
+ import torch
12
+
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
+ from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline, OVModelUnet, OVModelVaeDecoder, OVModelTextEncoder, OVModelVaeEncoder, VaeImageProcessor
15
+ from optimum.utils import (
16
+ DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER,
17
+ DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
18
+ DIFFUSION_MODEL_UNET_SUBFOLDER,
19
+ DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER,
20
+ DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER,
21
+ )
22
+
23
+
24
+ from diffusers import logging
25
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
26
+
27
+ class LCMOVModelUnet(OVModelUnet):
28
+ def __call__(
29
+ self,
30
+ sample: np.ndarray,
31
+ timestep: np.ndarray,
32
+ encoder_hidden_states: np.ndarray,
33
+ timestep_cond: Optional[np.ndarray] = None,
34
+ text_embeds: Optional[np.ndarray] = None,
35
+ time_ids: Optional[np.ndarray] = None,
36
+ ):
37
+ self._compile()
38
+
39
+ inputs = {
40
+ "sample": sample,
41
+ "timestep": timestep,
42
+ "encoder_hidden_states": encoder_hidden_states,
43
+ }
44
+
45
+ if timestep_cond is not None:
46
+ inputs["timestep_cond"] = timestep_cond
47
+ if text_embeds is not None:
48
+ inputs["text_embeds"] = text_embeds
49
+ if time_ids is not None:
50
+ inputs["time_ids"] = time_ids
51
+
52
+ outputs = self.request(inputs, shared_memory=True)
53
+ return list(outputs.values())
54
+
55
+ class OVLatentConsistencyModelPipeline(OVStableDiffusionPipeline):
56
+
57
+ def __init__(
58
+ self,
59
+ vae_decoder: openvino.runtime.Model,
60
+ text_encoder: openvino.runtime.Model,
61
+ unet: openvino.runtime.Model,
62
+ config: Dict[str, Any],
63
+ tokenizer: "CLIPTokenizer",
64
+ scheduler: Union["DDIMScheduler", "PNDMScheduler", "LMSDiscreteScheduler"],
65
+ feature_extractor: Optional["CLIPFeatureExtractor"] = None,
66
+ vae_encoder: Optional[openvino.runtime.Model] = None,
67
+ text_encoder_2: Optional[openvino.runtime.Model] = None,
68
+ tokenizer_2: Optional["CLIPTokenizer"] = None,
69
+ device: str = "CPU",
70
+ dynamic_shapes: bool = True,
71
+ compile: bool = True,
72
+ ov_config: Optional[Dict[str, str]] = None,
73
+ model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
74
+ **kwargs,
75
+ ):
76
+ self._internal_dict = config
77
+ self._device = device.upper()
78
+ self.is_dynamic = dynamic_shapes
79
+ self.ov_config = ov_config if ov_config is not None else {}
80
+ self._model_save_dir = (
81
+ Path(model_save_dir.name) if isinstance(model_save_dir, TemporaryDirectory) else model_save_dir
82
+ )
83
+ self.vae_decoder = OVModelVaeDecoder(vae_decoder, self)
84
+ self.unet = LCMOVModelUnet(unet, self)
85
+ self.text_encoder = OVModelTextEncoder(text_encoder, self) if text_encoder is not None else None
86
+ self.text_encoder_2 = (
87
+ OVModelTextEncoder(text_encoder_2, self, model_name=DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER)
88
+ if text_encoder_2 is not None
89
+ else None
90
+ )
91
+ self.vae_encoder = OVModelVaeEncoder(vae_encoder, self) if vae_encoder is not None else None
92
+
93
+ if "block_out_channels" in self.vae_decoder.config:
94
+ self.vae_scale_factor = 2 ** (len(self.vae_decoder.config["block_out_channels"]) - 1)
95
+ else:
96
+ self.vae_scale_factor = 8
97
+
98
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
99
+
100
+ self.tokenizer = tokenizer
101
+ self.tokenizer_2 = tokenizer_2
102
+ self.scheduler = scheduler
103
+ self.feature_extractor = feature_extractor
104
+ self.safety_checker = None
105
+ self.preprocessors = []
106
+
107
+ if self.is_dynamic:
108
+ self.reshape(batch_size=-1, height=-1, width=-1, num_images_per_prompt=-1)
109
+
110
+ if compile:
111
+ self.compile()
112
+
113
+ sub_models = {
114
+ DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER: self.text_encoder,
115
+ DIFFUSION_MODEL_UNET_SUBFOLDER: self.unet,
116
+ DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER: self.vae_decoder,
117
+ DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER: self.vae_encoder,
118
+ DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER: self.text_encoder_2,
119
+ }
120
+ for name in sub_models.keys():
121
+ self._internal_dict[name] = (
122
+ ("optimum", sub_models[name].__class__.__name__) if sub_models[name] is not None else (None, None)
123
+ )
124
+
125
+ self._internal_dict.pop("vae", None)
126
+
127
+ def _reshape_unet(
128
+ self,
129
+ model: openvino.runtime.Model,
130
+ batch_size: int = -1,
131
+ height: int = -1,
132
+ width: int = -1,
133
+ num_images_per_prompt: int = -1,
134
+ tokenizer_max_length: int = -1,
135
+ ):
136
+ if batch_size == -1 or num_images_per_prompt == -1:
137
+ batch_size = -1
138
+ else:
139
+ batch_size = batch_size * num_images_per_prompt
140
+
141
+ height = height // self.vae_scale_factor if height > 0 else height
142
+ width = width // self.vae_scale_factor if width > 0 else width
143
+ shapes = {}
144
+ for inputs in model.inputs:
145
+ shapes[inputs] = inputs.get_partial_shape()
146
+ if inputs.get_any_name() == "timestep":
147
+ shapes[inputs][0] = 1
148
+ elif inputs.get_any_name() == "sample":
149
+ in_channels = self.unet.config.get("in_channels", None)
150
+ if in_channels is None:
151
+ in_channels = shapes[inputs][1]
152
+ if in_channels.is_dynamic:
153
+ logger.warning(
154
+ "Could not identify `in_channels` from the unet configuration, to statically reshape the unet please provide a configuration."
155
+ )
156
+ self.is_dynamic = True
157
+
158
+ shapes[inputs] = [batch_size, in_channels, height, width]
159
+ elif inputs.get_any_name() == "timestep_cond":
160
+ shapes[inputs] = [batch_size, inputs.get_partial_shape()[1]]
161
+ elif inputs.get_any_name() == "text_embeds":
162
+ shapes[inputs] = [batch_size, self.text_encoder_2.config["projection_dim"]]
163
+ elif inputs.get_any_name() == "time_ids":
164
+ shapes[inputs] = [batch_size, inputs.get_partial_shape()[1]]
165
+ else:
166
+ shapes[inputs][0] = batch_size
167
+ shapes[inputs][1] = tokenizer_max_length
168
+ model.reshape(shapes)
169
+ return model
170
+
171
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=np.float32):
172
+ """
173
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
174
+ Args:
175
+ timesteps: np.array: generate embedding vectors at these timesteps
176
+ embedding_dim: int: dimension of the embeddings to generate
177
+ dtype: data type of the generated embeddings
178
+
179
+ Returns:
180
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
181
+ """
182
+ assert len(w.shape) == 1
183
+ w = w * 1000.
184
+
185
+ half_dim = embedding_dim // 2
186
+ emb = np.log(np.array(10000.)) / (half_dim - 1)
187
+ emb = np.exp(np.arange(half_dim, dtype=dtype) * -emb)
188
+ emb = w.astype(dtype)[:, None] * emb[None, :]
189
+ emb = np.concatenate([np.sin(emb), np.cos(emb)], axis=1)
190
+ if embedding_dim % 2 == 1: # zero pad
191
+ emb = np.pad(emb, (0, 1))
192
+ assert emb.shape == (w.shape[0], embedding_dim)
193
+ return emb
194
+
195
+ # Adapted from https://github.com/huggingface/optimum/blob/15b8d1eed4d83c5004d3b60f6b6f13744b358f01/optimum/pipelines/diffusers/pipeline_stable_diffusion.py#L201
196
+ def __call__(
197
+ self,
198
+ prompt: Optional[Union[str, List[str]]] = None,
199
+ height: Optional[int] = None,
200
+ width: Optional[int] = None,
201
+ num_inference_steps: int = 4,
202
+ original_inference_steps: int = None,
203
+ guidance_scale: float = 7.5,
204
+ num_images_per_prompt: int = 1,
205
+ eta: float = 0.0,
206
+ generator: Optional[np.random.RandomState] = None,
207
+ latents: Optional[np.ndarray] = None,
208
+ prompt_embeds: Optional[np.ndarray] = None,
209
+ output_type: str = "pil",
210
+ return_dict: bool = True,
211
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
212
+ callback_steps: int = 1,
213
+ guidance_rescale: float = 0.0,
214
+ ):
215
+ r"""
216
+ Function invoked when calling the pipeline for generation.
217
+
218
+ Args:
219
+ prompt (`Optional[Union[str, List[str]]]`, defaults to None):
220
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
221
+ instead.
222
+ height (`Optional[int]`, defaults to None):
223
+ The height in pixels of the generated image.
224
+ width (`Optional[int]`, defaults to None):
225
+ The width in pixels of the generated image.
226
+ num_inference_steps (`int`, defaults to 4):
227
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
228
+ expense of slower inference.
229
+ original_inference_steps (`int`, *optional*):
230
+ The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
231
+ we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
232
+ following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
233
+ scheduler's `original_inference_steps` attribute.
234
+ guidance_scale (`float`, defaults to 7.5):
235
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
236
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
237
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
238
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
239
+ usually at the expense of lower image quality.
240
+ num_images_per_prompt (`int`, defaults to 1):
241
+ The number of images to generate per prompt.
242
+ eta (`float`, defaults to 0.0):
243
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
244
+ [`schedulers.DDIMScheduler`], will be ignored for others.
245
+ generator (`Optional[np.random.RandomState]`, defaults to `None`)::
246
+ A np.random.RandomState to make generation deterministic.
247
+ latents (`Optional[np.ndarray]`, defaults to `None`):
248
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
249
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
250
+ tensor will ge generated by sampling using the supplied random `generator`.
251
+ prompt_embeds (`Optional[np.ndarray]`, defaults to `None`):
252
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
253
+ provided, text embeddings will be generated from `prompt` input argument.
254
+ output_type (`str`, defaults to `"pil"`):
255
+ The output format of the generate image. Choose between
256
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
257
+ return_dict (`bool`, defaults to `True`):
258
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
259
+ plain tuple.
260
+ callback (Optional[Callable], defaults to `None`):
261
+ A function that will be called every `callback_steps` steps during inference. The function will be
262
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
263
+ callback_steps (`int`, defaults to 1):
264
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
265
+ called at every step.
266
+ guidance_rescale (`float`, defaults to 0.0):
267
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
268
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
269
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
270
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
271
+
272
+ Returns:
273
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
274
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
275
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
276
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
277
+ (nsfw) content, according to the `safety_checker`.
278
+ """
279
+ height = height or self.unet.config.get("sample_size", 64) * self.vae_scale_factor
280
+ width = width or self.unet.config.get("sample_size", 64) * self.vae_scale_factor
281
+
282
+ # check inputs. Raise error if not correct
283
+ self.check_inputs(
284
+ prompt, height, width, callback_steps, None, prompt_embeds, None
285
+ )
286
+
287
+ # define call parameters
288
+ if isinstance(prompt, str):
289
+ batch_size = 1
290
+ elif isinstance(prompt, list):
291
+ batch_size = len(prompt)
292
+ else:
293
+ batch_size = prompt_embeds.shape[0]
294
+
295
+ if generator is None:
296
+ generator = np.random
297
+
298
+ # Create torch.Generator instance with same state as np.random.RandomState
299
+ torch_generator = torch.Generator().manual_seed(int(generator.get_state()[1][0]))
300
+
301
+ #do_classifier_free_guidance = guidance_scale > 1.0
302
+
303
+ # NOTE: when a LCM is distilled from an LDM via latent consistency distillation (Algorithm 1) with guided
304
+ # distillation, the forward pass of the LCM learns to approximate sampling from the LDM using CFG with the
305
+ # unconditional prompt "" (the empty string). Due to this, LCMs currently do not support negative prompts.
306
+ prompt_embeds = self._encode_prompt(
307
+ prompt,
308
+ num_images_per_prompt,
309
+ False,
310
+ negative_prompt=None,
311
+ prompt_embeds=prompt_embeds,
312
+ negative_prompt_embeds=None,
313
+ )
314
+
315
+ # set timesteps
316
+ self.scheduler.set_timesteps(num_inference_steps, "cpu", original_inference_steps=original_inference_steps)
317
+ timesteps = self.scheduler.timesteps
318
+
319
+ latents = self.prepare_latents(
320
+ batch_size * num_images_per_prompt,
321
+ self.unet.config.get("in_channels", 4),
322
+ height,
323
+ width,
324
+ prompt_embeds.dtype,
325
+ generator,
326
+ latents,
327
+ )
328
+
329
+ # Get Guidance Scale Embedding
330
+ w = np.tile(guidance_scale - 1, batch_size * num_images_per_prompt)
331
+ w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.get("time_cond_proj_dim", 256))
332
+
333
+ # Adapted from diffusers to extend it for other runtimes than ORT
334
+ timestep_dtype = self.unet.input_dtype.get("timestep", np.float32)
335
+
336
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
337
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
338
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
339
+ # and should be between [0, 1]
340
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
341
+ extra_step_kwargs = {}
342
+ if accepts_eta:
343
+ extra_step_kwargs["eta"] = eta
344
+
345
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
346
+ if accepts_generator:
347
+ extra_step_kwargs["generator"] = torch_generator
348
+
349
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
350
+ for i, t in enumerate(self.progress_bar(timesteps)):
351
+
352
+ # predict the noise residual
353
+ timestep = np.array([t], dtype=timestep_dtype)
354
+
355
+ noise_pred = self.unet(sample=latents, timestep=timestep, timestep_cond = w_embedding, encoder_hidden_states=prompt_embeds)[0]
356
+
357
+ # compute the previous noisy sample x_t -> x_t-1
358
+ latents, denoised = self.scheduler.step(
359
+ torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs, return_dict = False
360
+ )
361
+
362
+ latents, denoised = latents.numpy(), denoised.numpy()
363
+
364
+ # call the callback, if provided
365
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
366
+ if callback is not None and i % callback_steps == 0:
367
+ callback(i, t, latents)
368
+
369
+ if output_type == "latent":
370
+ image = latents
371
+ has_nsfw_concept = None
372
+ else:
373
+ denoised /= self.vae_decoder.config.get("scaling_factor", 0.18215)
374
+ # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
375
+ image = np.concatenate(
376
+ [self.vae_decoder(latent_sample=denoised[i : i + 1])[0] for i in range(latents.shape[0])]
377
+ )
378
+ image, has_nsfw_concept = self.run_safety_checker(image)
379
+
380
+ if has_nsfw_concept is None:
381
+ do_denormalize = [True] * image.shape[0]
382
+ else:
383
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
384
+
385
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
386
+
387
+ if not return_dict:
388
+ return (image, has_nsfw_concept)
389
+
390
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
backend/lcmdiffusion/pipelines/openvino/lcm_scheduler.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+
25
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
26
+ from diffusers.utils import BaseOutput, logging
27
+ from diffusers.utils.torch_utils import randn_tensor
28
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
29
+
30
+
31
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+
34
+ @dataclass
35
+ class LCMSchedulerOutput(BaseOutput):
36
+ """
37
+ Output class for the scheduler's `step` function output.
38
+
39
+ Args:
40
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
41
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
42
+ denoising loop.
43
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
44
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
45
+ `pred_original_sample` can be used to preview progress or for guidance.
46
+ """
47
+
48
+ prev_sample: torch.FloatTensor
49
+ denoised: Optional[torch.FloatTensor] = None
50
+
51
+
52
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
53
+ def betas_for_alpha_bar(
54
+ num_diffusion_timesteps,
55
+ max_beta=0.999,
56
+ alpha_transform_type="cosine",
57
+ ):
58
+ """
59
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
60
+ (1-beta) over time from t = [0,1].
61
+
62
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
63
+ to that part of the diffusion process.
64
+
65
+
66
+ Args:
67
+ num_diffusion_timesteps (`int`): the number of betas to produce.
68
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
69
+ prevent singularities.
70
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
71
+ Choose from `cosine` or `exp`
72
+
73
+ Returns:
74
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
75
+ """
76
+ if alpha_transform_type == "cosine":
77
+
78
+ def alpha_bar_fn(t):
79
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
80
+
81
+ elif alpha_transform_type == "exp":
82
+
83
+ def alpha_bar_fn(t):
84
+ return math.exp(t * -12.0)
85
+
86
+ else:
87
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
88
+
89
+ betas = []
90
+ for i in range(num_diffusion_timesteps):
91
+ t1 = i / num_diffusion_timesteps
92
+ t2 = (i + 1) / num_diffusion_timesteps
93
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
94
+ return torch.tensor(betas, dtype=torch.float32)
95
+
96
+
97
+ # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
98
+ def rescale_zero_terminal_snr(betas: torch.FloatTensor) -> torch.FloatTensor:
99
+ """
100
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
101
+
102
+
103
+ Args:
104
+ betas (`torch.FloatTensor`):
105
+ the betas that the scheduler is being initialized with.
106
+
107
+ Returns:
108
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
109
+ """
110
+ # Convert betas to alphas_bar_sqrt
111
+ alphas = 1.0 - betas
112
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
113
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
114
+
115
+ # Store old values.
116
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
117
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
118
+
119
+ # Shift so the last timestep is zero.
120
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
121
+
122
+ # Scale so the first timestep is back to the old value.
123
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
124
+
125
+ # Convert alphas_bar_sqrt to betas
126
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
127
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
128
+ alphas = torch.cat([alphas_bar[0:1], alphas])
129
+ betas = 1 - alphas
130
+
131
+ return betas
132
+
133
+
134
+ class LCMScheduler(SchedulerMixin, ConfigMixin):
135
+ """
136
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
137
+ non-Markovian guidance.
138
+
139
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config
140
+ attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be
141
+ accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving
142
+ functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions.
143
+
144
+ Args:
145
+ num_train_timesteps (`int`, defaults to 1000):
146
+ The number of diffusion steps to train the model.
147
+ beta_start (`float`, defaults to 0.0001):
148
+ The starting `beta` value of inference.
149
+ beta_end (`float`, defaults to 0.02):
150
+ The final `beta` value.
151
+ beta_schedule (`str`, defaults to `"linear"`):
152
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
153
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
154
+ trained_betas (`np.ndarray`, *optional*):
155
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
156
+ original_inference_steps (`int`, *optional*, defaults to 50):
157
+ The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we
158
+ will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule.
159
+ clip_sample (`bool`, defaults to `True`):
160
+ Clip the predicted sample for numerical stability.
161
+ clip_sample_range (`float`, defaults to 1.0):
162
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
163
+ set_alpha_to_one (`bool`, defaults to `True`):
164
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
165
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
166
+ otherwise it uses the alpha value at step 0.
167
+ steps_offset (`int`, defaults to 0):
168
+ An offset added to the inference steps. You can use a combination of `offset=1` and
169
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
170
+ Diffusion.
171
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
172
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
173
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
174
+ Video](https://imagen.research.google/video/paper.pdf) paper).
175
+ thresholding (`bool`, defaults to `False`):
176
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
177
+ as Stable Diffusion.
178
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
179
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
180
+ sample_max_value (`float`, defaults to 1.0):
181
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
182
+ timestep_spacing (`str`, defaults to `"leading"`):
183
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
184
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
185
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
186
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
187
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
188
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
189
+ """
190
+
191
+ order = 1
192
+
193
+ @register_to_config
194
+ def __init__(
195
+ self,
196
+ num_train_timesteps: int = 1000,
197
+ beta_start: float = 0.00085,
198
+ beta_end: float = 0.012,
199
+ beta_schedule: str = "scaled_linear",
200
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
201
+ original_inference_steps: int = 50,
202
+ clip_sample: bool = False,
203
+ clip_sample_range: float = 1.0,
204
+ set_alpha_to_one: bool = True,
205
+ steps_offset: int = 0,
206
+ prediction_type: str = "epsilon",
207
+ thresholding: bool = False,
208
+ dynamic_thresholding_ratio: float = 0.995,
209
+ sample_max_value: float = 1.0,
210
+ timestep_spacing: str = "leading",
211
+ rescale_betas_zero_snr: bool = False,
212
+ ):
213
+ if trained_betas is not None:
214
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
215
+ elif beta_schedule == "linear":
216
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
217
+ elif beta_schedule == "scaled_linear":
218
+ # this schedule is very specific to the latent diffusion model.
219
+ self.betas = (
220
+ torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
221
+ )
222
+ elif beta_schedule == "squaredcos_cap_v2":
223
+ # Glide cosine schedule
224
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
225
+ else:
226
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
227
+
228
+ # Rescale for zero SNR
229
+ if rescale_betas_zero_snr:
230
+ self.betas = rescale_zero_terminal_snr(self.betas)
231
+
232
+ self.alphas = 1.0 - self.betas
233
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
234
+
235
+ # At every step in ddim, we are looking into the previous alphas_cumprod
236
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
237
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
238
+ # whether we use the final alpha of the "non-previous" one.
239
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
240
+
241
+ # standard deviation of the initial noise distribution
242
+ self.init_noise_sigma = 1.0
243
+
244
+ # setable values
245
+ self.num_inference_steps = None
246
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
247
+
248
+ self._step_index = None
249
+
250
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
251
+ def _init_step_index(self, timestep):
252
+ if isinstance(timestep, torch.Tensor):
253
+ timestep = timestep.to(self.timesteps.device)
254
+
255
+ index_candidates = (self.timesteps == timestep).nonzero()
256
+
257
+ # The sigma index that is taken for the **very** first `step`
258
+ # is always the second index (or the last index if there is only 1)
259
+ # This way we can ensure we don't accidentally skip a sigma in
260
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
261
+ if len(index_candidates) > 1:
262
+ step_index = index_candidates[1]
263
+ else:
264
+ step_index = index_candidates[0]
265
+
266
+ self._step_index = step_index.item()
267
+
268
+ @property
269
+ def step_index(self):
270
+ return self._step_index
271
+
272
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
273
+ """
274
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
275
+ current timestep.
276
+
277
+ Args:
278
+ sample (`torch.FloatTensor`):
279
+ The input sample.
280
+ timestep (`int`, *optional*):
281
+ The current timestep in the diffusion chain.
282
+ Returns:
283
+ `torch.FloatTensor`:
284
+ A scaled input sample.
285
+ """
286
+ return sample
287
+
288
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
289
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
290
+ """
291
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
292
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
293
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
294
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
295
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
296
+
297
+ https://arxiv.org/abs/2205.11487
298
+ """
299
+ dtype = sample.dtype
300
+ batch_size, channels, *remaining_dims = sample.shape
301
+
302
+ if dtype not in (torch.float32, torch.float64):
303
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
304
+
305
+ # Flatten sample for doing quantile calculation along each image
306
+ sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
307
+
308
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
309
+
310
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
311
+ s = torch.clamp(
312
+ s, min=1, max=self.config.sample_max_value
313
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
314
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
315
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
316
+
317
+ sample = sample.reshape(batch_size, channels, *remaining_dims)
318
+ sample = sample.to(dtype)
319
+
320
+ return sample
321
+
322
+ def set_timesteps(
323
+ self,
324
+ num_inference_steps: int,
325
+ device: Union[str, torch.device] = None,
326
+ original_inference_steps: Optional[int] = None,
327
+ ):
328
+ """
329
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
330
+
331
+ Args:
332
+ num_inference_steps (`int`):
333
+ The number of diffusion steps used when generating samples with a pre-trained model.
334
+ device (`str` or `torch.device`, *optional*):
335
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
336
+ original_inference_steps (`int`, *optional*):
337
+ The original number of inference steps, which will be used to generate a linearly-spaced timestep
338
+ schedule (which is different from the standard `diffusers` implementation). We will then take
339
+ `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as
340
+ our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.
341
+ """
342
+
343
+ if num_inference_steps > self.config.num_train_timesteps:
344
+ raise ValueError(
345
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
346
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
347
+ f" maximal {self.config.num_train_timesteps} timesteps."
348
+ )
349
+
350
+ self.num_inference_steps = num_inference_steps
351
+ original_steps = (
352
+ original_inference_steps if original_inference_steps is not None else self.original_inference_steps
353
+ )
354
+
355
+ if original_steps > self.config.num_train_timesteps:
356
+ raise ValueError(
357
+ f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:"
358
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
359
+ f" maximal {self.config.num_train_timesteps} timesteps."
360
+ )
361
+
362
+ if num_inference_steps > original_steps:
363
+ raise ValueError(
364
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:"
365
+ f" {original_steps} because the final timestep schedule will be a subset of the"
366
+ f" `original_inference_steps`-sized initial timestep schedule."
367
+ )
368
+
369
+ # LCM Timesteps Setting
370
+ # Currently, only linear spacing is supported.
371
+ c = self.config.num_train_timesteps // original_steps
372
+ # LCM Training Steps Schedule
373
+ lcm_origin_timesteps = np.asarray(list(range(1, original_steps + 1))) * c - 1
374
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
375
+ # LCM Inference Steps Schedule
376
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps]
377
+
378
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device=device, dtype=torch.long)
379
+
380
+ self._step_index = None
381
+
382
+ def get_scalings_for_boundary_condition_discrete(self, t):
383
+ self.sigma_data = 0.5 # Default: 0.5
384
+
385
+ # By dividing 0.1: This is almost a delta function at t=0.
386
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
387
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
388
+ return c_skip, c_out
389
+
390
+ def step(
391
+ self,
392
+ model_output: torch.FloatTensor,
393
+ timestep: int,
394
+ sample: torch.FloatTensor,
395
+ generator: Optional[torch.Generator] = None,
396
+ return_dict: bool = True,
397
+ ) -> Union[LCMSchedulerOutput, Tuple]:
398
+ """
399
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
400
+ process from the learned model outputs (most often the predicted noise).
401
+
402
+ Args:
403
+ model_output (`torch.FloatTensor`):
404
+ The direct output from learned diffusion model.
405
+ timestep (`float`):
406
+ The current discrete timestep in the diffusion chain.
407
+ sample (`torch.FloatTensor`):
408
+ A current instance of a sample created by the diffusion process.
409
+ generator (`torch.Generator`, *optional*):
410
+ A random number generator.
411
+ return_dict (`bool`, *optional*, defaults to `True`):
412
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
413
+ Returns:
414
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
415
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
416
+ tuple is returned where the first element is the sample tensor.
417
+ """
418
+ if self.num_inference_steps is None:
419
+ raise ValueError(
420
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
421
+ )
422
+
423
+ if self.step_index is None:
424
+ self._init_step_index(timestep)
425
+
426
+ # 1. get previous step value
427
+ prev_step_index = self.step_index + 1
428
+ if prev_step_index < len(self.timesteps):
429
+ prev_timestep = self.timesteps[prev_step_index]
430
+ else:
431
+ prev_timestep = timestep
432
+
433
+ # 2. compute alphas, betas
434
+ alpha_prod_t = self.alphas_cumprod[timestep]
435
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
436
+
437
+ beta_prod_t = 1 - alpha_prod_t
438
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
439
+
440
+ # 3. Get scalings for boundary conditions
441
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
442
+
443
+ # 4. Compute the predicted original sample x_0 based on the model parameterization
444
+ if self.config.prediction_type == "epsilon": # noise-prediction
445
+ predicted_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
446
+ elif self.config.prediction_type == "sample": # x-prediction
447
+ predicted_original_sample = model_output
448
+ elif self.config.prediction_type == "v_prediction": # v-prediction
449
+ predicted_original_sample = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
450
+ else:
451
+ raise ValueError(
452
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
453
+ " `v_prediction` for `LCMScheduler`."
454
+ )
455
+
456
+ # 5. Clip or threshold "predicted x_0"
457
+ if self.config.thresholding:
458
+ predicted_original_sample = self._threshold_sample(predicted_original_sample)
459
+ elif self.config.clip_sample:
460
+ predicted_original_sample = predicted_original_sample.clamp(
461
+ -self.config.clip_sample_range, self.config.clip_sample_range
462
+ )
463
+
464
+ # 6. Denoise model output using boundary conditions
465
+ denoised = c_out * predicted_original_sample + c_skip * sample
466
+
467
+ # 7. Sample and inject noise z ~ N(0, I) for MultiStep Inference
468
+ # Noise is not used for one-step sampling.
469
+ if len(self.timesteps) > 1:
470
+ noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device)
471
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
472
+ else:
473
+ prev_sample = denoised
474
+
475
+ # upon completion increase step index by one
476
+ self._step_index += 1
477
+
478
+ if not return_dict:
479
+ return (prev_sample, denoised)
480
+
481
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
482
+
483
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
484
+ def add_noise(
485
+ self,
486
+ original_samples: torch.FloatTensor,
487
+ noise: torch.FloatTensor,
488
+ timesteps: torch.IntTensor,
489
+ ) -> torch.FloatTensor:
490
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
491
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
492
+ timesteps = timesteps.to(original_samples.device)
493
+
494
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
495
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
496
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
497
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
498
+
499
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
500
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
501
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
502
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
503
+
504
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
505
+ return noisy_samples
506
+
507
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
508
+ def get_velocity(
509
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
510
+ ) -> torch.FloatTensor:
511
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
512
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
513
+ timesteps = timesteps.to(sample.device)
514
+
515
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
516
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
517
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
518
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
519
+
520
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
521
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
522
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
523
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
524
+
525
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
526
+ return velocity
527
+
528
+ def __len__(self):
529
+ return self.config.num_train_timesteps
backend/models/__pycache__/lcmdiffusion_setting.cpython-311.pyc ADDED
Binary file (1.39 kB). View file
 
backend/models/lcmdiffusion_setting.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from pydantic import BaseModel
4
+ from constants import LCM_DEFAULT_MODEL
5
+
6
+
7
+ class LCMDiffusionSetting(BaseModel):
8
+ lcm_model_id: str = LCM_DEFAULT_MODEL
9
+ prompt: str = ""
10
+ image_height: Optional[int] = 512
11
+ image_width: Optional[int] = 512
12
+ inference_steps: Optional[int] = 4
13
+ guidance_scale: Optional[float] = 8
14
+ number_of_images: Optional[int] = 1
15
+ seed: Optional[int] = -1
16
+ use_openvino: bool = False
17
+ use_seed: bool = False
18
+ use_offline_model: bool = False
19
+ use_safety_checker: bool = True
constants.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from os import environ
2
+
3
+ APP_VERSION = "v1.0.0 beta 7"
4
+ LCM_DEFAULT_MODEL = "SimianLuo/LCM_Dreamshaper_v7"
5
+ LCM_DEFAULT_MODEL_OPENVINO = "deinferno/LCM_Dreamshaper_v7-openvino"
6
+ APP_NAME = "FastSD CPU"
7
+ APP_SETTINGS_FILE = "settings.yaml"
8
+ RESULTS_DIRECTORY = "results"
9
+ CONFIG_DIRECTORY = "configs"
10
+ DEVICE = environ.get("DEVICE", "cpu")
context.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from app_settings import Settings
3
+ from models.interface_types import InterfaceType
4
+ from backend.lcm_text_to_image import LCMTextToImage
5
+ from time import time
6
+ from backend.image_saver import ImageSaver
7
+ from pprint import pprint
8
+
9
+
10
+ class Context:
11
+ def __init__(
12
+ self,
13
+ interface_type: InterfaceType,
14
+ device="cpu",
15
+ ):
16
+ self.interface_type = interface_type
17
+ self.lcm_text_to_image = LCMTextToImage(device)
18
+
19
+ def generate_text_to_image(
20
+ self,
21
+ settings: Settings,
22
+ reshape: bool = False,
23
+ device: str = "cpu",
24
+ ) -> Any:
25
+ tick = time()
26
+ pprint(settings.lcm_diffusion_setting.model_dump())
27
+ self.lcm_text_to_image.init(
28
+ settings.lcm_diffusion_setting.lcm_model_id,
29
+ settings.lcm_diffusion_setting.use_openvino,
30
+ device,
31
+ settings.lcm_diffusion_setting.use_offline_model,
32
+ )
33
+ images = self.lcm_text_to_image.generate(
34
+ settings.lcm_diffusion_setting,
35
+ reshape,
36
+ )
37
+ elapsed = time() - tick
38
+ ImageSaver.save_images(
39
+ settings.results_path,
40
+ images=images,
41
+ lcm_diffusion_setting=settings.lcm_diffusion_setting,
42
+ )
43
+ print(f"Elapsed time : {elapsed:.2f} seconds")
44
+ return images
frontend/__pycache__/utils.cpython-311.pyc ADDED
Binary file (1.49 kB). View file
 
frontend/gui/__pycache__/app_window.cpython-311.pyc ADDED
Binary file (29.9 kB). View file
 
frontend/gui/__pycache__/image_generator_worker.cpython-311.pyc ADDED
Binary file (2.54 kB). View file
 
frontend/gui/__pycache__/ui.cpython-311.pyc ADDED
Binary file (907 Bytes). View file
 
frontend/gui/app_window.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PyQt5.QtWidgets import (
2
+ QWidget,
3
+ QPushButton,
4
+ QHBoxLayout,
5
+ QVBoxLayout,
6
+ QLabel,
7
+ QLineEdit,
8
+ QMainWindow,
9
+ QSlider,
10
+ QTabWidget,
11
+ QSpacerItem,
12
+ QSizePolicy,
13
+ QComboBox,
14
+ QCheckBox,
15
+ QTextEdit,
16
+ QToolButton,
17
+ QFileDialog,
18
+ )
19
+
20
+ from PyQt5.QtGui import QPixmap, QDesktopServices
21
+ from PyQt5.QtCore import QSize, QThreadPool, Qt, QUrl
22
+
23
+ from PIL.ImageQt import ImageQt
24
+ from constants import (
25
+ LCM_DEFAULT_MODEL,
26
+ LCM_DEFAULT_MODEL_OPENVINO,
27
+ APP_NAME,
28
+ APP_VERSION,
29
+ )
30
+ from frontend.gui.image_generator_worker import ImageGeneratorWorker
31
+ from app_settings import AppSettings
32
+ from paths import FastStableDiffusionPaths
33
+ from frontend.utils import is_reshape_required
34
+ from context import Context
35
+ from models.interface_types import InterfaceType
36
+ from constants import DEVICE
37
+ from frontend.utils import enable_openvino_controls
38
+
39
+
40
+ class MainWindow(QMainWindow):
41
+ def __init__(self, config: AppSettings):
42
+ super().__init__()
43
+ self.setWindowTitle(APP_NAME)
44
+ self.setFixedSize(QSize(600, 620))
45
+ self.init_ui()
46
+ self.pipeline = None
47
+ self.threadpool = QThreadPool()
48
+ self.config = config
49
+ self.device = "cpu"
50
+ self.previous_width = 0
51
+ self.previous_height = 0
52
+ self.previous_model = ""
53
+ self.previous_num_of_images = 0
54
+ self.context = Context(InterfaceType.GUI)
55
+ self.init_ui_values()
56
+ self.gen_images = []
57
+ self.image_index = 0
58
+ print(f"Output path : { self.config.settings.results_path}")
59
+
60
+ def init_ui_values(self):
61
+ self.lcm_model.setEnabled(
62
+ not self.config.settings.lcm_diffusion_setting.use_openvino
63
+ )
64
+ self.guidance.setValue(
65
+ int(self.config.settings.lcm_diffusion_setting.guidance_scale * 10)
66
+ )
67
+ self.seed_value.setEnabled(self.config.settings.lcm_diffusion_setting.use_seed)
68
+ self.safety_checker.setChecked(
69
+ self.config.settings.lcm_diffusion_setting.use_safety_checker
70
+ )
71
+ self.use_openvino_check.setChecked(
72
+ self.config.settings.lcm_diffusion_setting.use_openvino
73
+ )
74
+ self.width.setCurrentText(
75
+ str(self.config.settings.lcm_diffusion_setting.image_width)
76
+ )
77
+ self.height.setCurrentText(
78
+ str(self.config.settings.lcm_diffusion_setting.image_height)
79
+ )
80
+ self.inference_steps.setValue(
81
+ int(self.config.settings.lcm_diffusion_setting.inference_steps)
82
+ )
83
+ self.seed_check.setChecked(self.config.settings.lcm_diffusion_setting.use_seed)
84
+ self.seed_value.setText(str(self.config.settings.lcm_diffusion_setting.seed))
85
+ self.use_local_model_folder.setChecked(
86
+ self.config.settings.lcm_diffusion_setting.use_offline_model
87
+ )
88
+ self.results_path.setText(self.config.settings.results_path)
89
+ self.num_images.setValue(
90
+ self.config.settings.lcm_diffusion_setting.number_of_images
91
+ )
92
+
93
+ def init_ui(self):
94
+ self.create_main_tab()
95
+ self.create_settings_tab()
96
+ self.create_about_tab()
97
+ self.show()
98
+
99
+ def create_main_tab(self):
100
+ self.img = QLabel("<<Image>>")
101
+ self.img.setAlignment(Qt.AlignCenter)
102
+ self.img.setFixedSize(QSize(512, 512))
103
+
104
+ self.prompt = QTextEdit()
105
+ self.prompt.setPlaceholderText("A fantasy landscape")
106
+ self.prompt.setAcceptRichText(False)
107
+ self.generate = QPushButton("Generate")
108
+ self.generate.clicked.connect(self.text_to_image)
109
+ self.prompt.setFixedHeight(35)
110
+ self.browse_results = QPushButton("...")
111
+ self.browse_results.setFixedWidth(30)
112
+ self.browse_results.clicked.connect(self.on_open_results_folder)
113
+ self.browse_results.setToolTip("Open output folder")
114
+
115
+ hlayout = QHBoxLayout()
116
+ hlayout.addWidget(self.prompt)
117
+ hlayout.addWidget(self.generate)
118
+ hlayout.addWidget(self.browse_results)
119
+
120
+ self.previous_img_btn = QToolButton()
121
+ self.previous_img_btn.setText("<")
122
+ self.previous_img_btn.clicked.connect(self.on_show_previous_image)
123
+ self.next_img_btn = QToolButton()
124
+ self.next_img_btn.setText(">")
125
+ self.next_img_btn.clicked.connect(self.on_show_next_image)
126
+ hlayout_nav = QHBoxLayout()
127
+ hlayout_nav.addWidget(self.previous_img_btn)
128
+ hlayout_nav.addWidget(self.img)
129
+ hlayout_nav.addWidget(self.next_img_btn)
130
+
131
+ vlayout = QVBoxLayout()
132
+ vlayout.addLayout(hlayout_nav)
133
+ vlayout.addLayout(hlayout)
134
+
135
+ self.tab_widget = QTabWidget(self)
136
+ self.tab_main = QWidget()
137
+ self.tab_settings = QWidget()
138
+ self.tab_about = QWidget()
139
+ self.tab_main.setLayout(vlayout)
140
+
141
+ self.tab_widget.addTab(self.tab_main, "Text to Image")
142
+ self.tab_widget.addTab(self.tab_settings, "Settings")
143
+ self.tab_widget.addTab(self.tab_about, "About")
144
+
145
+ self.setCentralWidget(self.tab_widget)
146
+ self.use_seed = False
147
+
148
+ def create_settings_tab(self):
149
+ model_hlayout = QHBoxLayout()
150
+ self.lcm_model_label = QLabel("Latent Consistency Model:")
151
+ self.lcm_model = QLineEdit(LCM_DEFAULT_MODEL)
152
+ model_hlayout.addWidget(self.lcm_model_label)
153
+ model_hlayout.addWidget(self.lcm_model)
154
+
155
+ self.inference_steps_value = QLabel("Number of inference steps: 4")
156
+ self.inference_steps = QSlider(orientation=Qt.Orientation.Horizontal)
157
+ self.inference_steps.setMaximum(25)
158
+ self.inference_steps.setMinimum(1)
159
+ self.inference_steps.setValue(4)
160
+ self.inference_steps.valueChanged.connect(self.update_steps_label)
161
+
162
+ self.num_images_value = QLabel("Number of images: 1")
163
+ self.num_images = QSlider(orientation=Qt.Orientation.Horizontal)
164
+ self.num_images.setMaximum(100)
165
+ self.num_images.setMinimum(1)
166
+ self.num_images.setValue(1)
167
+ self.num_images.valueChanged.connect(self.update_num_images_label)
168
+
169
+ self.guidance_value = QLabel("Guidance scale: 8")
170
+ self.guidance = QSlider(orientation=Qt.Orientation.Horizontal)
171
+ self.guidance.setMaximum(200)
172
+ self.guidance.setMinimum(10)
173
+ self.guidance.setValue(80)
174
+ self.guidance.valueChanged.connect(self.update_guidance_label)
175
+
176
+ self.width_value = QLabel("Width :")
177
+ self.width = QComboBox(self)
178
+ self.width.addItem("256")
179
+ self.width.addItem("512")
180
+ self.width.addItem("768")
181
+ self.width.setCurrentText("512")
182
+ self.width.currentIndexChanged.connect(self.on_width_changed)
183
+
184
+ self.height_value = QLabel("Height :")
185
+ self.height = QComboBox(self)
186
+ self.height.addItem("256")
187
+ self.height.addItem("512")
188
+ self.height.addItem("768")
189
+ self.height.setCurrentText("512")
190
+ self.height.currentIndexChanged.connect(self.on_height_changed)
191
+
192
+ self.seed_check = QCheckBox("Use seed")
193
+ self.seed_value = QLineEdit()
194
+ self.seed_value.setInputMask("9999999999")
195
+ self.seed_value.setText("123123")
196
+ self.seed_check.stateChanged.connect(self.seed_changed)
197
+
198
+ self.safety_checker = QCheckBox("Use safety checker")
199
+ self.safety_checker.setChecked(True)
200
+ self.safety_checker.stateChanged.connect(self.use_safety_checker_changed)
201
+
202
+ self.use_openvino_check = QCheckBox("Use OpenVINO")
203
+ self.use_openvino_check.setChecked(False)
204
+ self.use_local_model_folder = QCheckBox(
205
+ "Use locally cached model or downloaded model folder(offline)"
206
+ )
207
+ self.use_openvino_check.setEnabled(enable_openvino_controls())
208
+ self.use_local_model_folder.setChecked(False)
209
+ self.use_local_model_folder.stateChanged.connect(self.use_offline_model_changed)
210
+ self.use_openvino_check.stateChanged.connect(self.use_openvino_changed)
211
+
212
+ hlayout = QHBoxLayout()
213
+ hlayout.addWidget(self.seed_check)
214
+ hlayout.addWidget(self.seed_value)
215
+ hspacer = QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum)
216
+ slider_hspacer = QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum)
217
+
218
+ self.results_path_label = QLabel("Output path:")
219
+ self.results_path = QLineEdit()
220
+ self.results_path.textChanged.connect(self.on_path_changed)
221
+ self.browse_folder_btn = QToolButton()
222
+ self.browse_folder_btn.setText("...")
223
+ self.browse_folder_btn.clicked.connect(self.on_browse_folder)
224
+
225
+ self.reset = QPushButton("Reset All")
226
+ self.reset.clicked.connect(self.reset_all_settings)
227
+
228
+ vlayout = QVBoxLayout()
229
+ vspacer = QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)
230
+ vlayout.addItem(hspacer)
231
+ vlayout.addLayout(model_hlayout)
232
+ vlayout.addWidget(self.use_local_model_folder)
233
+ vlayout.addItem(slider_hspacer)
234
+ vlayout.addWidget(self.inference_steps_value)
235
+ vlayout.addWidget(self.inference_steps)
236
+ vlayout.addWidget(self.num_images_value)
237
+ vlayout.addWidget(self.num_images)
238
+ vlayout.addWidget(self.width_value)
239
+ vlayout.addWidget(self.width)
240
+ vlayout.addWidget(self.height_value)
241
+ vlayout.addWidget(self.height)
242
+ vlayout.addWidget(self.guidance_value)
243
+ vlayout.addWidget(self.guidance)
244
+ vlayout.addLayout(hlayout)
245
+ vlayout.addWidget(self.safety_checker)
246
+ vlayout.addWidget(self.use_openvino_check)
247
+ vlayout.addWidget(self.results_path_label)
248
+ hlayout_path = QHBoxLayout()
249
+ hlayout_path.addWidget(self.results_path)
250
+ hlayout_path.addWidget(self.browse_folder_btn)
251
+ vlayout.addLayout(hlayout_path)
252
+ self.tab_settings.setLayout(vlayout)
253
+ hlayout_reset = QHBoxLayout()
254
+ hspacer = QSpacerItem(20, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
255
+ hlayout_reset.addItem(hspacer)
256
+ hlayout_reset.addWidget(self.reset)
257
+ vlayout.addLayout(hlayout_reset)
258
+ vlayout.addItem(vspacer)
259
+
260
+ def create_about_tab(self):
261
+ self.label = QLabel()
262
+ self.label.setAlignment(Qt.AlignCenter)
263
+ self.label.setText(
264
+ f"""<h1>FastSD CPU {APP_VERSION}</h1>
265
+ <h3>(c)2023 - Rupesh Sreeraman</h3>
266
+ <h3>Faster stable diffusion on CPU</h3>
267
+ <h3>Based on Latent Consistency Models</h3>
268
+ <h3>GitHub : https://github.com/rupeshs/fastsdcpu/</h3>"""
269
+ )
270
+
271
+ vlayout = QVBoxLayout()
272
+ vlayout.addWidget(self.label)
273
+ self.tab_about.setLayout(vlayout)
274
+
275
+ def on_show_next_image(self):
276
+ if self.image_index != len(self.gen_images) - 1 and len(self.gen_images) > 0:
277
+ self.previous_img_btn.setEnabled(True)
278
+ self.image_index += 1
279
+ self.img.setPixmap(self.gen_images[self.image_index])
280
+ if self.image_index == len(self.gen_images) - 1:
281
+ self.next_img_btn.setEnabled(False)
282
+
283
+ def on_open_results_folder(self):
284
+ QDesktopServices.openUrl(QUrl.fromLocalFile(self.config.settings.results_path))
285
+
286
+ def on_show_previous_image(self):
287
+ if self.image_index != 0:
288
+ self.next_img_btn.setEnabled(True)
289
+ self.image_index -= 1
290
+ self.img.setPixmap(self.gen_images[self.image_index])
291
+ if self.image_index == 0:
292
+ self.previous_img_btn.setEnabled(False)
293
+
294
+ def on_path_changed(self, text):
295
+ self.config.settings.results_path = text
296
+
297
+ def on_browse_folder(self):
298
+ options = QFileDialog.Options()
299
+ options |= QFileDialog.ShowDirsOnly
300
+
301
+ folder_path = QFileDialog.getExistingDirectory(
302
+ self, "Select a Folder", "", options=options
303
+ )
304
+
305
+ if folder_path:
306
+ self.config.settings.results_path = folder_path
307
+ self.results_path.setText(folder_path)
308
+
309
+ def on_width_changed(self, index):
310
+ width_txt = self.width.itemText(index)
311
+ self.config.settings.lcm_diffusion_setting.image_width = int(width_txt)
312
+
313
+ def on_height_changed(self, index):
314
+ height_txt = self.height.itemText(index)
315
+ self.config.settings.lcm_diffusion_setting.image_height = int(height_txt)
316
+
317
+ def use_openvino_changed(self, state):
318
+ if state == 2:
319
+ self.lcm_model.setEnabled(False)
320
+ self.config.settings.lcm_diffusion_setting.use_openvino = True
321
+ else:
322
+ self.config.settings.lcm_diffusion_setting.use_openvino = False
323
+
324
+ def use_offline_model_changed(self, state):
325
+ if state == 2:
326
+ self.config.settings.lcm_diffusion_setting.use_offline_model = True
327
+ else:
328
+ self.config.settings.lcm_diffusion_setting.use_offline_model = False
329
+
330
+ def use_safety_checker_changed(self, state):
331
+ if state == 2:
332
+ self.config.settings.lcm_diffusion_setting.use_safety_checker = True
333
+ else:
334
+ self.config.settings.lcm_diffusion_setting.use_safety_checker = False
335
+
336
+ def update_steps_label(self, value):
337
+ self.inference_steps_value.setText(f"Number of inference steps: {value}")
338
+ self.config.settings.lcm_diffusion_setting.inference_steps = value
339
+
340
+ def update_num_images_label(self, value):
341
+ self.num_images_value.setText(f"Number of images: {value}")
342
+ self.config.settings.lcm_diffusion_setting.number_of_images = value
343
+
344
+ def update_guidance_label(self, value):
345
+ val = round(int(value) / 10, 1)
346
+ self.guidance_value.setText(f"Guidance scale: {val}")
347
+ self.config.settings.lcm_diffusion_setting.guidance_scale = val
348
+
349
+ def seed_changed(self, state):
350
+ if state == 2:
351
+ self.seed_value.setEnabled(True)
352
+ self.config.settings.lcm_diffusion_setting.use_seed = True
353
+ else:
354
+ self.seed_value.setEnabled(False)
355
+ self.config.settings.lcm_diffusion_setting.use_seed = False
356
+
357
+ def get_seed_value(self) -> int:
358
+ use_seed = self.config.settings.lcm_diffusion_setting.use_seed
359
+ seed_value = int(self.seed_value.text()) if use_seed else -1
360
+ return seed_value
361
+
362
+ def generate_image(self):
363
+ self.config.settings.lcm_diffusion_setting.seed = self.get_seed_value()
364
+ self.config.settings.lcm_diffusion_setting.prompt = self.prompt.toPlainText()
365
+
366
+ if self.config.settings.lcm_diffusion_setting.use_openvino:
367
+ model_id = LCM_DEFAULT_MODEL_OPENVINO
368
+ else:
369
+ model_id = self.lcm_model.text()
370
+
371
+ self.config.settings.lcm_diffusion_setting.lcm_model_id = model_id
372
+
373
+ reshape_required = False
374
+ if self.config.settings.lcm_diffusion_setting.use_openvino:
375
+ # Detect dimension change
376
+ reshape_required = is_reshape_required(
377
+ self.previous_width,
378
+ self.config.settings.lcm_diffusion_setting.image_width,
379
+ self.previous_height,
380
+ self.config.settings.lcm_diffusion_setting.image_height,
381
+ self.previous_model,
382
+ model_id,
383
+ self.previous_num_of_images,
384
+ self.config.settings.lcm_diffusion_setting.number_of_images,
385
+ )
386
+
387
+ images = self.context.generate_text_to_image(
388
+ self.config.settings,
389
+ reshape_required,
390
+ DEVICE,
391
+ )
392
+ self.image_index = 0
393
+ self.gen_images = []
394
+ for img in images:
395
+ im = ImageQt(img).copy()
396
+ pixmap = QPixmap.fromImage(im)
397
+ self.gen_images.append(pixmap)
398
+
399
+ if len(self.gen_images) > 1:
400
+ self.next_img_btn.setEnabled(True)
401
+ self.previous_img_btn.setEnabled(False)
402
+ else:
403
+ self.next_img_btn.setEnabled(False)
404
+ self.previous_img_btn.setEnabled(False)
405
+
406
+ self.img.setPixmap(self.gen_images[0])
407
+
408
+ self.previous_width = self.config.settings.lcm_diffusion_setting.image_width
409
+ self.previous_height = self.config.settings.lcm_diffusion_setting.image_height
410
+ self.previous_model = model_id
411
+ self.previous_num_of_images = (
412
+ self.config.settings.lcm_diffusion_setting.number_of_images
413
+ )
414
+
415
+ def text_to_image(self):
416
+ self.img.setText("Please wait...")
417
+ worker = ImageGeneratorWorker(self.generate_image)
418
+ self.threadpool.start(worker)
419
+
420
+ def closeEvent(self, event):
421
+ self.config.settings.lcm_diffusion_setting.seed = self.get_seed_value()
422
+ print(self.config.settings.lcm_diffusion_setting)
423
+ print("Saving settings")
424
+ self.config.save()
425
+
426
+ def reset_all_settings(self):
427
+ self.use_local_model_folder.setChecked(False)
428
+ self.width.setCurrentText("512")
429
+ self.height.setCurrentText("512")
430
+ self.inference_steps.setValue(4)
431
+ self.guidance.setValue(80)
432
+ self.use_openvino_check.setChecked(False)
433
+ self.seed_check.setChecked(False)
434
+ self.safety_checker.setChecked(True)
435
+ self.results_path.setText(FastStableDiffusionPaths().get_results_path())
frontend/gui/image_generator_worker.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PyQt5.QtCore import (
2
+ pyqtSlot,
3
+ QRunnable,
4
+ pyqtSignal,
5
+ pyqtSlot,
6
+ )
7
+ from PyQt5.QtCore import QObject
8
+ import traceback
9
+ import sys
10
+
11
+
12
+ class WorkerSignals(QObject):
13
+ finished = pyqtSignal()
14
+ error = pyqtSignal(tuple)
15
+ result = pyqtSignal(object)
16
+
17
+
18
+ class ImageGeneratorWorker(QRunnable):
19
+ def __init__(self, fn, *args, **kwargs):
20
+ super(ImageGeneratorWorker, self).__init__()
21
+ self.fn = fn
22
+ self.args = args
23
+ self.kwargs = kwargs
24
+ self.signals = WorkerSignals()
25
+
26
+ @pyqtSlot()
27
+ def run(self):
28
+ try:
29
+ result = self.fn(*self.args, **self.kwargs)
30
+ except:
31
+ traceback.print_exc()
32
+ exctype, value = sys.exc_info()[:2]
33
+ self.signals.error.emit((exctype, value, traceback.format_exc()))
34
+ else:
35
+ self.signals.result.emit(result)
36
+ finally:
37
+ self.signals.finished.emit()
frontend/gui/ui.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from frontend.gui.app_window import MainWindow
3
+ from PyQt5.QtWidgets import QApplication
4
+ import sys
5
+ from app_settings import AppSettings
6
+
7
+
8
+ def start_gui(
9
+ argv: List[str],
10
+ app_settings: AppSettings,
11
+ ):
12
+ app = QApplication(sys.argv)
13
+ window = MainWindow(app_settings)
14
+ window.show()
15
+ app.exec()
frontend/utils.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from constants import DEVICE
2
+ import platform
3
+
4
+
5
+ def is_reshape_required(
6
+ prev_width: int,
7
+ cur_width: int,
8
+ prev_height: int,
9
+ cur_height: int,
10
+ prev_model: int,
11
+ cur_model: int,
12
+ prev_num_of_images: int,
13
+ cur_num_of_images: int,
14
+ ) -> bool:
15
+ print(f"width - {prev_width} {cur_width}")
16
+ print(f"height - {prev_height} {cur_height}")
17
+ print(f"model - {prev_model} {cur_model}")
18
+ reshape_required = False
19
+ if (
20
+ prev_width != cur_width
21
+ or prev_height != cur_height
22
+ or prev_model != cur_model
23
+ or prev_num_of_images != cur_num_of_images
24
+ ):
25
+ print("Reshape and compile")
26
+ reshape_required = True
27
+
28
+ return reshape_required
29
+
30
+
31
+ def enable_openvino_controls() -> bool:
32
+ return DEVICE == "cpu" and platform.system().lower() != "darwin"
frontend/webui/css/style.css ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ footer {
2
+ visibility: hidden
3
+ }
4
+
5
+ #generate_button {
6
+ color: white;
7
+ border-color: #007bff;
8
+ background: #007bff;
9
+ width: 150px;
10
+ margin-top: 38px;
11
+ height: 80px;
12
+ }
13
+
14
+ #save_button {
15
+ color: white;
16
+ border-color: #028b40;
17
+ background: #01b97c;
18
+ width: 200px;
19
+ }
20
+
21
+ #settings_header {
22
+ background: rgb(245, 105, 105);
23
+
24
+ }
frontend/webui/text_to_image_ui.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ import gradio as gr
3
+
4
+ from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
5
+ from context import Context
6
+ from models.interface_types import InterfaceType
7
+ from app_settings import Settings
8
+ from constants import LCM_DEFAULT_MODEL, LCM_DEFAULT_MODEL_OPENVINO
9
+ from frontend.utils import is_reshape_required
10
+ from app_settings import AppSettings
11
+ from constants import DEVICE
12
+ from frontend.utils import enable_openvino_controls
13
+
14
+ random_enabled = True
15
+
16
+ context = Context(InterfaceType.WEBUI)
17
+ previous_width = 0
18
+ previous_height = 0
19
+ previous_model_id = ""
20
+ previous_num_of_images = 0
21
+
22
+
23
+ def generate_text_to_image(
24
+ prompt,
25
+ image_height,
26
+ image_width,
27
+ inference_steps,
28
+ guidance_scale,
29
+ num_images,
30
+ seed,
31
+ use_openvino,
32
+ use_safety_checker,
33
+ ) -> Any:
34
+ global previous_height, previous_width, previous_model_id, previous_num_of_images
35
+ model_id = LCM_DEFAULT_MODEL
36
+ if use_openvino:
37
+ model_id = LCM_DEFAULT_MODEL_OPENVINO
38
+
39
+ use_seed = True if seed != -1 else False
40
+
41
+ lcm_diffusion_settings = LCMDiffusionSetting(
42
+ lcm_model_id=model_id,
43
+ prompt=prompt,
44
+ image_height=image_height,
45
+ image_width=image_width,
46
+ inference_steps=inference_steps,
47
+ guidance_scale=guidance_scale,
48
+ number_of_images=num_images,
49
+ seed=seed,
50
+ use_openvino=use_openvino,
51
+ use_safety_checker=use_safety_checker,
52
+ use_seed=use_seed,
53
+ )
54
+ settings = Settings(
55
+ lcm_diffusion_setting=lcm_diffusion_settings,
56
+ )
57
+ reshape = False
58
+ if use_openvino:
59
+ reshape = is_reshape_required(
60
+ previous_width,
61
+ image_width,
62
+ previous_height,
63
+ image_height,
64
+ previous_model_id,
65
+ model_id,
66
+ previous_num_of_images,
67
+ num_images,
68
+ )
69
+ images = context.generate_text_to_image(
70
+ settings,
71
+ reshape,
72
+ DEVICE,
73
+ )
74
+ previous_width = image_width
75
+ previous_height = image_height
76
+ previous_model_id = model_id
77
+ previous_num_of_images = num_images
78
+
79
+ return images
80
+
81
+
82
+ def get_text_to_image_ui(app_settings: AppSettings) -> None:
83
+ with gr.Blocks():
84
+ with gr.Row():
85
+ with gr.Column():
86
+
87
+ def random_seed():
88
+ global random_enabled
89
+ random_enabled = not random_enabled
90
+ seed_val = -1
91
+ if not random_enabled:
92
+ seed_val = 42
93
+ return gr.Number.update(
94
+ interactive=not random_enabled, value=seed_val
95
+ )
96
+
97
+ with gr.Row():
98
+ prompt = gr.Textbox(
99
+ label="Describe the image you'd like to see",
100
+ lines=3,
101
+ placeholder="A fantasy landscape",
102
+ )
103
+
104
+ generate_btn = gr.Button(
105
+ "Generate",
106
+ elem_id="generate_button",
107
+ scale=0,
108
+ )
109
+ num_inference_steps = gr.Slider(
110
+ 1, 25, value=4, step=1, label="Inference Steps"
111
+ )
112
+ image_height = gr.Slider(
113
+ 256, 768, value=512, step=256, label="Image Height"
114
+ )
115
+ image_width = gr.Slider(
116
+ 256, 768, value=512, step=256, label="Image Width"
117
+ )
118
+ num_images = gr.Slider(
119
+ 1,
120
+ 50,
121
+ value=1,
122
+ step=1,
123
+ label="Number of images to generate",
124
+ )
125
+ with gr.Accordion("Advanced options", open=False):
126
+ guidance_scale = gr.Slider(
127
+ 1.0, 30.0, value=8, step=0.5, label="Guidance Scale"
128
+ )
129
+
130
+ seed = gr.Number(
131
+ label="Seed",
132
+ value=-1,
133
+ precision=0,
134
+ interactive=False,
135
+ )
136
+ seed_checkbox = gr.Checkbox(
137
+ label="Use random seed",
138
+ value=True,
139
+ interactive=True,
140
+ )
141
+
142
+ openvino_checkbox = gr.Checkbox(
143
+ label="Use OpenVINO",
144
+ value=False,
145
+ interactive=enable_openvino_controls(),
146
+ )
147
+
148
+ safety_checker_checkbox = gr.Checkbox(
149
+ label="Use Safety Checker",
150
+ value=True,
151
+ interactive=True,
152
+ )
153
+
154
+ input_params = [
155
+ prompt,
156
+ image_height,
157
+ image_width,
158
+ num_inference_steps,
159
+ guidance_scale,
160
+ num_images,
161
+ seed,
162
+ openvino_checkbox,
163
+ safety_checker_checkbox,
164
+ ]
165
+
166
+ with gr.Column():
167
+ output = gr.Gallery(
168
+ label="Generated images",
169
+ show_label=True,
170
+ elem_id="gallery",
171
+ columns=2,
172
+ )
173
+
174
+ seed_checkbox.change(fn=random_seed, outputs=seed)
175
+ generate_btn.click(
176
+ fn=generate_text_to_image,
177
+ inputs=input_params,
178
+ outputs=output,
179
+ )
frontend/webui/ui.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from constants import APP_VERSION
3
+ from frontend.webui.text_to_image_ui import get_text_to_image_ui
4
+ from paths import FastStableDiffusionPaths
5
+ from app_settings import AppSettings
6
+
7
+
8
+ def _get_footer_message() -> str:
9
+ version = f"<center><p> v{APP_VERSION} "
10
+ footer_msg = version + (
11
+ ' © 2023 <a href="https://github.com/rupeshs">'
12
+ " Rupesh Sreeraman</a></p></center>"
13
+ )
14
+ return footer_msg
15
+
16
+
17
+ def get_web_ui(app_settings: AppSettings) -> gr.Blocks:
18
+ with gr.Blocks(
19
+ css=FastStableDiffusionPaths.get_css_path(),
20
+ title="FastSD CPU",
21
+ ) as fastsd_web_ui:
22
+ gr.HTML("<center><H1>FastSD CPU</H1></center>")
23
+ with gr.Tabs():
24
+ with gr.TabItem("Text to Image"):
25
+ get_text_to_image_ui(app_settings)
26
+ gr.HTML(_get_footer_message())
27
+
28
+ return fastsd_web_ui
29
+
30
+
31
+ def start_webui(
32
+ app_settings: AppSettings,
33
+ share: bool = False,
34
+ ):
35
+ webui = get_web_ui(app_settings)
36
+ webui.launch(share=share)
models/__pycache__/interface_types.cpython-311.pyc ADDED
Binary file (578 Bytes). View file
 
models/__pycache__/settings.cpython-311.pyc ADDED
Binary file (899 Bytes). View file
 
models/interface_types.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+
4
+ class InterfaceType(Enum):
5
+ WEBUI = "Web User Interface"
6
+ GUI = "Graphical User Interface"
7
+ CLI = "Command Line Interface"
models/settings.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
3
+ from paths import FastStableDiffusionPaths
4
+
5
+
6
+ class Settings(BaseModel):
7
+ results_path: str = FastStableDiffusionPaths().get_results_path()
8
+ lcm_diffusion_setting: LCMDiffusionSetting = LCMDiffusionSetting()
paths.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import constants
3
+
4
+
5
+ def join_paths(
6
+ first_path: str,
7
+ second_path: str,
8
+ ) -> str:
9
+ return os.path.join(first_path, second_path)
10
+
11
+
12
+ def get_app_path():
13
+ app_dir = os.path.dirname(__file__)
14
+ work_dir = os.path.dirname(app_dir)
15
+ return work_dir
16
+
17
+
18
+ def get_configs_path() -> str:
19
+ config_path = join_paths(get_app_path(), constants.CONFIG_DIRECTORY)
20
+ return config_path
21
+
22
+
23
+ class FastStableDiffusionPaths:
24
+ @staticmethod
25
+ def get_app_settings_path() -> str:
26
+ configs_path = get_configs_path()
27
+ settings_path = join_paths(
28
+ configs_path,
29
+ constants.APP_SETTINGS_FILE,
30
+ )
31
+ return settings_path
32
+
33
+ @staticmethod
34
+ def get_results_path() -> str:
35
+ results_path = join_paths(get_app_path(), constants.RESULTS_DIRECTORY)
36
+ return results_path
37
+
38
+ @staticmethod
39
+ def get_css_path():
40
+ app_dir = os.path.dirname(__file__)
41
+ css_path = os.path.join(
42
+ app_dir,
43
+ "frontend",
44
+ "webui",
45
+ "css",
46
+ "style.css",
47
+ )
48
+ return css_path
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.23.0
2
+ diffusers==0.21.4
3
+ transformers==4.34.0
4
+ PyQt5
5
+ Pillow==9.4.0
6
+ openvino==2023.1.0
7
+ optimum-intel==1.11.0
8
+ onnx==1.14.1
9
+ onnxruntime==1.16.1
10
+ pydantic==2.4.2
11
+ typing-extensions==4.8.0
12
+ pyyaml
13
+ gradio==3.39.0
utils.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+
3
+
4
+ def show_system_info():
5
+ try:
6
+ print(f"Running on {platform.system()} platform")
7
+ print(f"OS: {platform.platform()}")
8
+ print(f"Processor: {platform.processor()}")
9
+ except Exception as ex:
10
+ print(f"Error ocurred while getting system information {ex}")