ehristoforu commited on
Commit
65efad1
1 Parent(s): be2a7dc

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,10 +1,11 @@
1
  ---
2
  title: Rensor
3
- emoji: 🏃
4
- colorFrom: green
5
- colorTo: pink
6
  sdk: static
7
  pinned: false
 
8
  ---
9
 
10
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Rensor
3
+ emoji: 🐠
4
+ colorFrom: pink
5
+ colorTo: blue
6
  sdk: static
7
  pinned: false
8
+ license: other
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
_run-cpu.bat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ @echo off
2
+
3
+ python app-cpu.py
_run-gpu.bat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ @echo off
2
+
3
+ python app-gpu.py
app-cpu.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import requests
4
+ import time
5
+ import argparse
6
+ import os
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv("config.txt")
10
+
11
+ from engine.generateCPU import cpugen
12
+ from engine.upscaler import upscale_image
13
+ from engine.promptGenerator import prompting
14
+
15
+
16
+ css = """
17
+ #container{
18
+ margin: 0 auto;
19
+ max-width: 40rem;
20
+ }
21
+ #intro{
22
+ max-width: 100%;
23
+ text-align: center;
24
+ margin: 0 auto;
25
+ }
26
+ #generate_button {
27
+ color: white;
28
+ border-color: #007bff;
29
+ background: #007bff;
30
+ width: 200px;
31
+ height: 50px;
32
+ }
33
+ footer {
34
+ visibility: hidden
35
+ }
36
+ """
37
+
38
+ with gr.Blocks(title="Rensor", css=css, theme="ehristoforu/RE_Theme") as webui:
39
+ with gr.Row():
40
+ with gr.Row(visible=False, variant="panel") as prompter:
41
+ with gr.Column(scale=1):
42
+ chatbot = gr.Textbox(show_label=False, interactive=False, max_lines=16, lines=14)
43
+ with gr.Row():
44
+ chat_text = gr.Textbox(show_label=False, placeholder="Enter short prompt", max_lines=2, lines=1, interactive=True, scale=30)
45
+ chat_submit = gr.Button(value="Prompt", scale=1)
46
+
47
+ chat_submit.click(fn=lambda x: gr.update(value="Prompting...", interactive=False), inputs=chat_submit, outputs=chat_submit).then(prompting, inputs=chat_text, outputs=chatbot).then(fn=lambda x: gr.update(value="Prompt", interactive=True), inputs=chat_submit, outputs=chat_submit)
48
+
49
+ with gr.Column(scale=3):
50
+ with gr.Row():
51
+ gallery = gr.Gallery(show_label=False, rows=2, columns=6, preview=True, value=["assets/favicon.png"])
52
+ work_time = gr.Markdown(visible=True)
53
+ dump_outputs = gr.Gallery(visible=False)
54
+ with gr.Row():
55
+ prompt = gr.Textbox(show_label=False, placeholder="Your amazing prompt...", max_lines=2, lines=2, interactive=True, scale=18)
56
+ button = gr.Button(value="Generate", variant="primary", interactive=True, scale=1)
57
+ with gr.Row():
58
+ advenced = gr.Checkbox(label="Advanced settings", value=False, interactive=True)
59
+ prompter_change = gr.Checkbox(label="Prompter", value=False, interactive=True)
60
+ with gr.Row(visible=False, variant="panel") as settings_tab:
61
+ with gr.Column(scale=1):
62
+ with gr.Tab("Settings"):
63
+ with gr.Row(scale=10):
64
+ mode = gr.Radio(label="Mode", choices=["High Quality", "Fast", "Super-fast"], value="Fast", info="Relationship between generation speed and quality.", interactive=True)
65
+ with gr.Row(scale=10):
66
+ guidance = gr.Slider(label="Guidance Scale", maximum=20.0, minimum=0.0, value=8.0, step=0.1, interactive=True)
67
+ with gr.Row(scale=10):
68
+ num_images = gr.Slider(label="Number of images", maximum=12, minimum=1, value=2, step=1, interactive=True)
69
+ with gr.Row(scale=1):
70
+ upscale_button = gr.Image(label="🚀 Upload image to 2x upscale", sources="upload", type="numpy", show_download_button=False, interactive=True)
71
+
72
+
73
+
74
+ button.click(fn=lambda x: gr.update(visible=False), inputs=work_time, outputs=work_time).then(fn=lambda x: gr.update(value="Generating...", variant="secondary", interactive=False), inputs=button, outputs=button).then(cpugen, inputs=[prompt, mode, guidance, num_images], outputs=[gallery, work_time]).then(fn=lambda x: gr.update(value="Generate", variant="primary", interactive=True), inputs=button, outputs=button).then(fn=lambda x: gr.update(visible=True), inputs=work_time, outputs=work_time)
75
+
76
+ upscale_button.upload(fn=lambda x: gr.update(visible=False), inputs=work_time, outputs=work_time).then(fn=lambda x: gr.update(label="🖼️ Image uploaded to 2x upscale", interactive=False), inputs=upscale_button, outputs=upscale_button).then(fn=lambda x: gr.update(value="Upscaling...", variant="secondary", interactive=False), inputs=button, outputs=button).then(upscale_image, inputs=upscale_button, outputs=[gallery, work_time]).then(fn=lambda x: gr.update(label="🚀 Upload image to 2x upscale", interactive=True), inputs=upscale_button, outputs=upscale_button).then(fn=lambda x: gr.update(value="Generate", variant="primary", interactive=True), inputs=button, outputs=button).then(fn=lambda x: gr.update(visible=True), inputs=work_time, outputs=work_time)
77
+
78
+ advenced.change(fn=lambda x: gr.update(visible=x), inputs=advenced, outputs=settings_tab, queue=False, api_name=False)
79
+ prompter_change.change(fn=lambda x: gr.update(visible=x), inputs=prompter_change, outputs=prompter, queue=False, api_name=False)
80
+
81
+
82
+ '''
83
+ advenced.change(fn=lambda x: gr.update(visible=x), inputs=advenced, outputs=img2img_change, queue=False, api_name=False)
84
+ advenced.change(fn=lambda x: gr.update(visible=x), inputs=advenced, outputs=i2i_strength, queue=False, api_name=False)
85
+ advenced.change(fn=lambda x: gr.update(visible=x), inputs=advenced, outputs=init_image, queue=False, api_name=False)
86
+ '''
87
+
88
+ '''
89
+ img2img_change.change(
90
+ fn=lambda x: gr.update(interactive=x),
91
+ inputs=img2img_change,
92
+ outputs=init_image,
93
+ queue=False,
94
+ api_name=False,
95
+ ).then(
96
+ fn=lambda x: gr.update(interactive=x),
97
+ inputs=img2img_change,
98
+ outputs=i2i_strength,
99
+ queue=False,
100
+ api_name=False,
101
+ )
102
+ '''
103
+
104
+ webui.queue(max_size=20).launch(debug=False, share=True, server_port=5555, quiet=True, show_api=False, favicon_path="assets/favicon.png", inbrowser=True)
app-gpu.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import requests
4
+ import time
5
+ import os
6
+ import argparse
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv("config.txt")
10
+
11
+ from engine.generate import gpugen
12
+ from engine.upscaler import upscale_image
13
+ from engine.promptGenerator import prompting
14
+
15
+
16
+ css = """
17
+ #container{
18
+ margin: 0 auto;
19
+ max-width: 40rem;
20
+ }
21
+ #intro{
22
+ max-width: 100%;
23
+ text-align: center;
24
+ margin: 0 auto;
25
+ }
26
+ #generate_button {
27
+ color: white;
28
+ border-color: #007bff;
29
+ background: #007bff;
30
+ width: 200px;
31
+ height: 50px;
32
+ }
33
+ footer {
34
+ visibility: hidden
35
+ }
36
+ """
37
+
38
+ with gr.Blocks(title="Rensor", css=css, theme="ehristoforu/RE_Theme") as webui:
39
+ with gr.Row():
40
+ with gr.Row(visible=False, variant="panel") as prompter:
41
+ with gr.Column(scale=1):
42
+ chatbot = gr.Textbox(show_label=False, interactive=False, max_lines=16, lines=14)
43
+ with gr.Row():
44
+ chat_text = gr.Textbox(show_label=False, placeholder="Enter short prompt", max_lines=2, lines=1, interactive=True, scale=20)
45
+ chat_submit = gr.Button(value="Prompt", scale=1)
46
+
47
+ chat_submit.click(fn=lambda x: gr.update(value="Prompting...", interactive=False), inputs=chat_submit, outputs=chat_submit).then(prompting, inputs=chat_text, outputs=chatbot).then(fn=lambda x: gr.update(value="Prompt", interactive=True), inputs=chat_submit, outputs=chat_submit)
48
+
49
+
50
+ with gr.Column(scale=3):
51
+ with gr.Row():
52
+ gallery = gr.Gallery(show_label=False, rows=2, columns=6, preview=True, value=["assets/favicon.png"])
53
+ work_time = gr.Markdown(visible=False)
54
+ with gr.Row():
55
+ prompt = gr.Textbox(show_label=False, placeholder="Your amazing prompt...", max_lines=3, lines=3, interactive=True, scale=18)
56
+ button = gr.Button(value="Generate", variant="primary", scale=1)
57
+ with gr.Row():
58
+ advenced = gr.Checkbox(label="Advanced inputs/settings", value=False, interactive=True)
59
+ prompter_change = gr.Checkbox(label="Prompter", value=False, interactive=True)
60
+
61
+
62
+ with gr.Row(visible=False, variant="panel") as settings_tab:
63
+ with gr.Column(scale=1):
64
+ with gr.Tab("Settings"):
65
+ with gr.Row(scale=10):
66
+ mode = gr.Radio(label="Mode", choices=["High Quality", "Fast", "Super-fast"], value="Fast", info="Relationship between generation speed and quality.", interactive=True, visible=True)
67
+ with gr.Row(scale=10):
68
+ width = gr.Slider(label="Width", maximum=2048, minimum=256, value=512, step=8, interactive=True, visible=True)
69
+ height = gr.Slider(label="Height", maximum=2048, minimum=256, value=512, step=8, interactive=True, visible=True)
70
+ with gr.Row(scale=10):
71
+ guidance = gr.Slider(label="Guidance Scale", maximum=20.0, minimum=0.0, value=8.0, step=0.1, interactive=True, visible=True)
72
+ with gr.Row(scale=10):
73
+ num_images = gr.Slider(label="Number of images", maximum=12, minimum=1, value=1, step=1, interactive=True, visible=True)
74
+ with gr.Row(scale=1):
75
+ upscale_button = gr.Image(label="🚀 Upload image to 2x upscale", sources="upload", type="numpy", show_download_button=False, interactive=True)
76
+
77
+ with gr.Tab("Init image"):
78
+ with gr.Row():
79
+ with gr.Column():
80
+ img2img_change = gr.Checkbox(label="Init Image", value=False, visible=True, interactive=True, scale=10)
81
+ i2i_strength = gr.Slider(label="Init Strength", minimum=0.01, maximum=2, step=0.01, value=0.70, interactive=False, visible=True)
82
+ init_image = gr.Image(label="Init image", type="pil", interactive=False, visible=True, scale=1)
83
+ with gr.Tab("Inpaint"):
84
+ with gr.Row():
85
+ with gr.Column():
86
+ inpaint_change = gr.Checkbox(label="Inpaint", value=False, visible=True, interactive=True, scale=4)
87
+ inpaint_strength = gr.Slider(label="Inpaint Strength", minimum=0.01, maximum=2, step=0.01, value=0.70, interactive=False, visible=True)
88
+ inpaint_image = gr.Image(label="Inpaint image", type="pil", interactive=False, visible=True, tool="sketch", scale=1)
89
+
90
+
91
+
92
+ button.click(fn=lambda x: gr.update(visible=False), inputs=work_time, outputs=work_time).then(fn=lambda x: gr.update(value="Generating...", variant="secondary", interactive=False), inputs=button, outputs=button).then(gpugen, inputs=[prompt, mode, guidance, width, height, num_images, i2i_strength, inpaint_strength, img2img_change, inpaint_change, init_image, inpaint_image], outputs=[gallery, work_time]).then(fn=lambda x: gr.update(visible=True), inputs=work_time, outputs=work_time).then(fn=lambda x: gr.update(value="Generate", variant="primary", interactive=True), inputs=button, outputs=button)
93
+
94
+ upscale_button.upload(fn=lambda x: gr.update(visible=False), inputs=work_time, outputs=work_time).then(fn=lambda x: gr.update(label="🖼️ Image uploaded to 2x upscale", interactive=False), inputs=upscale_button, outputs=upscale_button).then(fn=lambda x: gr.update(value="Upscaling...", variant="secondary", interactive=False), inputs=button, outputs=button).then(upscale_image, inputs=upscale_button, outputs=[gallery, work_time]).then(fn=lambda x: gr.update(label="🚀 Upload image to 2x upscale", interactive=True), inputs=upscale_button, outputs=upscale_button).then(fn=lambda x: gr.update(value="Generate", variant="primary", interactive=True), inputs=button, outputs=button).then(fn=lambda x: gr.update(visible=True), inputs=work_time, outputs=work_time)
95
+
96
+ advenced.change(fn=lambda x: gr.update(visible=x), inputs=advenced, outputs=settings_tab, queue=False, api_name=False)
97
+ prompter_change.change(fn=lambda x: gr.update(visible=x), inputs=prompter_change, outputs=prompter, queue=False, api_name=False)
98
+
99
+
100
+ img2img_change.change(
101
+ fn=lambda x: gr.update(interactive=x),
102
+ inputs=img2img_change,
103
+ outputs=init_image,
104
+ queue=False,
105
+ api_name=False,
106
+ ).then(
107
+ fn=lambda x: gr.update(interactive=x),
108
+ inputs=img2img_change,
109
+ outputs=i2i_strength,
110
+ queue=False,
111
+ api_name=False,
112
+ )
113
+
114
+
115
+ inpaint_change.change(
116
+ fn=lambda x: gr.update(interactive=x),
117
+ inputs=inpaint_change,
118
+ outputs=inpaint_image,
119
+ queue=False,
120
+ api_name=False,
121
+ ).then(
122
+ fn=lambda x: gr.update(interactive=x),
123
+ inputs=inpaint_change,
124
+ outputs=inpaint_strength,
125
+ queue=False,
126
+ api_name=False,
127
+ )
128
+
129
+
130
+
131
+
132
+ webui.queue(max_size=20).launch(debug=False, share=True, server_port=5555, quiet=True, show_api=False, favicon_path="assets/favicon.png", inbrowser=True)
assets/favicon.png ADDED
config.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ path_to_base_model = models/checkpoint/base/dreamdrop-v1.safetensors
2
+
3
+ path_to_inpaint_model = models/checkpoint/inpaint/dreamdrop-inpainting.safetensors
4
+
5
+ xl = "False"
configs/lcm_ov_pipeline.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from pathlib import Path
4
+ from tempfile import TemporaryDirectory
5
+ from typing import List, Optional, Tuple, Union, Dict, Any, Callable, OrderedDict
6
+
7
+ import numpy as np
8
+ import openvino
9
+ import torch
10
+
11
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
12
+ from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline, OVModelUnet, OVModelVaeDecoder, OVModelTextEncoder, OVModelVaeEncoder, VaeImageProcessor
13
+ from optimum.utils import (
14
+ DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER,
15
+ DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
16
+ DIFFUSION_MODEL_UNET_SUBFOLDER,
17
+ DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER,
18
+ DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER,
19
+ )
20
+
21
+
22
+ from diffusers import logging
23
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
+
25
+ class LCMOVModelUnet(OVModelUnet):
26
+ def __call__(
27
+ self,
28
+ sample: np.ndarray,
29
+ timestep: np.ndarray,
30
+ encoder_hidden_states: np.ndarray,
31
+ timestep_cond: Optional[np.ndarray] = None,
32
+ text_embeds: Optional[np.ndarray] = None,
33
+ time_ids: Optional[np.ndarray] = None,
34
+ ):
35
+ self._compile()
36
+
37
+ inputs = {
38
+ "sample": sample,
39
+ "timestep": timestep,
40
+ "encoder_hidden_states": encoder_hidden_states,
41
+ }
42
+
43
+ if timestep_cond is not None:
44
+ inputs["timestep_cond"] = timestep_cond
45
+ if text_embeds is not None:
46
+ inputs["text_embeds"] = text_embeds
47
+ if time_ids is not None:
48
+ inputs["time_ids"] = time_ids
49
+
50
+ outputs = self.request(inputs, shared_memory=True)
51
+ return list(outputs.values())
52
+
53
+ class OVLatentConsistencyModelPipeline(OVStableDiffusionPipeline):
54
+
55
+ def __init__(
56
+ self,
57
+ vae_decoder: openvino.runtime.Model,
58
+ text_encoder: openvino.runtime.Model,
59
+ unet: openvino.runtime.Model,
60
+ config: Dict[str, Any],
61
+ tokenizer: "CLIPTokenizer",
62
+ scheduler: Union["DDIMScheduler", "PNDMScheduler", "LMSDiscreteScheduler"],
63
+ feature_extractor: Optional["CLIPFeatureExtractor"] = None,
64
+ vae_encoder: Optional[openvino.runtime.Model] = None,
65
+ text_encoder_2: Optional[openvino.runtime.Model] = None,
66
+ tokenizer_2: Optional["CLIPTokenizer"] = None,
67
+ device: str = "CPU",
68
+ dynamic_shapes: bool = True,
69
+ compile: bool = True,
70
+ ov_config: Optional[Dict[str, str]] = None,
71
+ model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
72
+ **kwargs,
73
+ ):
74
+ self._internal_dict = config
75
+ self._device = device.upper()
76
+ self.is_dynamic = dynamic_shapes
77
+ self.ov_config = ov_config if ov_config is not None else {}
78
+ self._model_save_dir = (
79
+ Path(model_save_dir.name) if isinstance(model_save_dir, TemporaryDirectory) else model_save_dir
80
+ )
81
+ self.vae_decoder = OVModelVaeDecoder(vae_decoder, self)
82
+ self.unet = LCMOVModelUnet(unet, self)
83
+ self.text_encoder = OVModelTextEncoder(text_encoder, self) if text_encoder is not None else None
84
+ self.text_encoder_2 = (
85
+ OVModelTextEncoder(text_encoder_2, self, model_name=DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER)
86
+ if text_encoder_2 is not None
87
+ else None
88
+ )
89
+ self.vae_encoder = OVModelVaeEncoder(vae_encoder, self) if vae_encoder is not None else None
90
+
91
+ if "block_out_channels" in self.vae_decoder.config:
92
+ self.vae_scale_factor = 2 ** (len(self.vae_decoder.config["block_out_channels"]) - 1)
93
+ else:
94
+ self.vae_scale_factor = 8
95
+
96
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
97
+
98
+ self.tokenizer = tokenizer
99
+ self.tokenizer_2 = tokenizer_2
100
+ self.scheduler = scheduler
101
+ self.feature_extractor = feature_extractor
102
+ self.safety_checker = None
103
+ self.preprocessors = []
104
+
105
+ if self.is_dynamic:
106
+ self.reshape(batch_size=-1, height=-1, width=-1, num_images_per_prompt=-1)
107
+
108
+ if compile:
109
+ self.compile()
110
+
111
+ sub_models = {
112
+ DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER: self.text_encoder,
113
+ DIFFUSION_MODEL_UNET_SUBFOLDER: self.unet,
114
+ DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER: self.vae_decoder,
115
+ DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER: self.vae_encoder,
116
+ DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER: self.text_encoder_2,
117
+ }
118
+ for name in sub_models.keys():
119
+ self._internal_dict[name] = (
120
+ ("optimum", sub_models[name].__class__.__name__) if sub_models[name] is not None else (None, None)
121
+ )
122
+
123
+ self._internal_dict.pop("vae", None)
124
+
125
+ def _reshape_unet(
126
+ self,
127
+ model: openvino.runtime.Model,
128
+ batch_size: int = -1,
129
+ height: int = -1,
130
+ width: int = -1,
131
+ num_images_per_prompt: int = -1,
132
+ tokenizer_max_length: int = -1,
133
+ ):
134
+ if batch_size == -1 or num_images_per_prompt == -1:
135
+ batch_size = -1
136
+ else:
137
+ batch_size = batch_size * num_images_per_prompt
138
+
139
+ height = height // self.vae_scale_factor if height > 0 else height
140
+ width = width // self.vae_scale_factor if width > 0 else width
141
+ shapes = {}
142
+ for inputs in model.inputs:
143
+ shapes[inputs] = inputs.get_partial_shape()
144
+ if inputs.get_any_name() == "timestep":
145
+ shapes[inputs][0] = 1
146
+ elif inputs.get_any_name() == "sample":
147
+ in_channels = self.unet.config.get("in_channels", None)
148
+ if in_channels is None:
149
+ in_channels = shapes[inputs][1]
150
+ if in_channels.is_dynamic:
151
+ logger.warning(
152
+ "Could not identify `in_channels` from the unet configuration, to statically reshape the unet please provide a configuration."
153
+ )
154
+ self.is_dynamic = True
155
+
156
+ shapes[inputs] = [batch_size, in_channels, height, width]
157
+ elif inputs.get_any_name() == "timestep_cond":
158
+ shapes[inputs] = [batch_size, inputs.get_partial_shape()[1]]
159
+ elif inputs.get_any_name() == "text_embeds":
160
+ shapes[inputs] = [batch_size, self.text_encoder_2.config["projection_dim"]]
161
+ elif inputs.get_any_name() == "time_ids":
162
+ shapes[inputs] = [batch_size, inputs.get_partial_shape()[1]]
163
+ else:
164
+ shapes[inputs][0] = batch_size
165
+ shapes[inputs][1] = tokenizer_max_length
166
+ model.reshape(shapes)
167
+ return model
168
+
169
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=np.float32):
170
+ """
171
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
172
+ Args:
173
+ timesteps: np.array: generate embedding vectors at these timesteps
174
+ embedding_dim: int: dimension of the embeddings to generate
175
+ dtype: data type of the generated embeddings
176
+
177
+ Returns:
178
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
179
+ """
180
+ assert len(w.shape) == 1
181
+ w = w * 1000.
182
+
183
+ half_dim = embedding_dim // 2
184
+ emb = np.log(np.array(10000.)) / (half_dim - 1)
185
+ emb = np.exp(np.arange(half_dim, dtype=dtype) * -emb)
186
+ emb = w.astype(dtype)[:, None] * emb[None, :]
187
+ emb = np.concatenate([np.sin(emb), np.cos(emb)], axis=1)
188
+ if embedding_dim % 2 == 1: # zero pad
189
+ emb = np.pad(emb, (0, 1))
190
+ assert emb.shape == (w.shape[0], embedding_dim)
191
+ return emb
192
+
193
+ # Adapted from https://github.com/huggingface/optimum/blob/15b8d1eed4d83c5004d3b60f6b6f13744b358f01/optimum/pipelines/diffusers/pipeline_stable_diffusion.py#L201
194
+ def __call__(
195
+ self,
196
+ prompt: Optional[Union[str, List[str]]] = None,
197
+ height: Optional[int] = None,
198
+ width: Optional[int] = None,
199
+ num_inference_steps: int = 4,
200
+ original_inference_steps: int = None,
201
+ guidance_scale: float = 7.5,
202
+ num_images_per_prompt: int = 1,
203
+ eta: float = 0.0,
204
+ generator: Optional[np.random.RandomState] = None,
205
+ latents: Optional[np.ndarray] = None,
206
+ prompt_embeds: Optional[np.ndarray] = None,
207
+ output_type: str = "pil",
208
+ return_dict: bool = True,
209
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
210
+ callback_steps: int = 1,
211
+ guidance_rescale: float = 0.0,
212
+ ):
213
+ r"""
214
+ Function invoked when calling the pipeline for generation.
215
+
216
+ Args:
217
+ prompt (`Optional[Union[str, List[str]]]`, defaults to None):
218
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
219
+ instead.
220
+ height (`Optional[int]`, defaults to None):
221
+ The height in pixels of the generated image.
222
+ width (`Optional[int]`, defaults to None):
223
+ The width in pixels of the generated image.
224
+ num_inference_steps (`int`, defaults to 4):
225
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
226
+ expense of slower inference.
227
+ original_inference_steps (`int`, *optional*):
228
+ The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
229
+ we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
230
+ following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
231
+ scheduler's `original_inference_steps` attribute.
232
+ guidance_scale (`float`, defaults to 7.5):
233
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
234
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
235
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
236
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
237
+ usually at the expense of lower image quality.
238
+ num_images_per_prompt (`int`, defaults to 1):
239
+ The number of images to generate per prompt.
240
+ eta (`float`, defaults to 0.0):
241
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
242
+ [`schedulers.DDIMScheduler`], will be ignored for others.
243
+ generator (`Optional[np.random.RandomState]`, defaults to `None`)::
244
+ A np.random.RandomState to make generation deterministic.
245
+ latents (`Optional[np.ndarray]`, defaults to `None`):
246
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
247
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
248
+ tensor will ge generated by sampling using the supplied random `generator`.
249
+ prompt_embeds (`Optional[np.ndarray]`, defaults to `None`):
250
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
251
+ provided, text embeddings will be generated from `prompt` input argument.
252
+ output_type (`str`, defaults to `"pil"`):
253
+ The output format of the generate image. Choose between
254
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
255
+ return_dict (`bool`, defaults to `True`):
256
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
257
+ plain tuple.
258
+ callback (Optional[Callable], defaults to `None`):
259
+ A function that will be called every `callback_steps` steps during inference. The function will be
260
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
261
+ callback_steps (`int`, defaults to 1):
262
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
263
+ called at every step.
264
+ guidance_rescale (`float`, defaults to 0.0):
265
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
266
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
267
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
268
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
269
+
270
+ Returns:
271
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
272
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
273
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
274
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
275
+ (nsfw) content, according to the `safety_checker`.
276
+ """
277
+ height = height or self.unet.config.get("sample_size", 64) * self.vae_scale_factor
278
+ width = width or self.unet.config.get("sample_size", 64) * self.vae_scale_factor
279
+
280
+ # check inputs. Raise error if not correct
281
+ self.check_inputs(
282
+ prompt, height, width, callback_steps, None, prompt_embeds, None
283
+ )
284
+
285
+ # define call parameters
286
+ if isinstance(prompt, str):
287
+ batch_size = 1
288
+ elif isinstance(prompt, list):
289
+ batch_size = len(prompt)
290
+ else:
291
+ batch_size = prompt_embeds.shape[0]
292
+
293
+ if generator is None:
294
+ generator = np.random
295
+
296
+ # Create torch.Generator instance with same state as np.random.RandomState
297
+ torch_generator = torch.Generator().manual_seed(int(generator.get_state()[1][0]))
298
+
299
+ #do_classifier_free_guidance = guidance_scale > 1.0
300
+
301
+ # NOTE: when a LCM is distilled from an LDM via latent consistency distillation (Algorithm 1) with guided
302
+ # distillation, the forward pass of the LCM learns to approximate sampling from the LDM using CFG with the
303
+ # unconditional prompt "" (the empty string). Due to this, LCMs currently do not support negative prompts.
304
+ prompt_embeds = self._encode_prompt(
305
+ prompt,
306
+ num_images_per_prompt,
307
+ False,
308
+ negative_prompt=None,
309
+ prompt_embeds=prompt_embeds,
310
+ negative_prompt_embeds=None,
311
+ )
312
+
313
+ # set timesteps
314
+ self.scheduler.set_timesteps(num_inference_steps, "cpu", original_inference_steps=original_inference_steps)
315
+ timesteps = self.scheduler.timesteps
316
+
317
+ latents = self.prepare_latents(
318
+ batch_size * num_images_per_prompt,
319
+ self.unet.config.get("in_channels", 4),
320
+ height,
321
+ width,
322
+ prompt_embeds.dtype,
323
+ generator,
324
+ latents,
325
+ )
326
+
327
+ # Get Guidance Scale Embedding
328
+ w = np.tile(guidance_scale - 1, batch_size * num_images_per_prompt)
329
+ w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.get("time_cond_proj_dim", 256))
330
+
331
+ # Adapted from diffusers to extend it for other runtimes than ORT
332
+ timestep_dtype = self.unet.input_dtype.get("timestep", np.float32)
333
+
334
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
335
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
336
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
337
+ # and should be between [0, 1]
338
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
339
+ extra_step_kwargs = {}
340
+ if accepts_eta:
341
+ extra_step_kwargs["eta"] = eta
342
+
343
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
344
+ if accepts_generator:
345
+ extra_step_kwargs["generator"] = torch_generator
346
+
347
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
348
+ for i, t in enumerate(self.progress_bar(timesteps)):
349
+
350
+ # predict the noise residual
351
+ timestep = np.array([t], dtype=timestep_dtype)
352
+
353
+ noise_pred = self.unet(sample=latents, timestep=timestep, timestep_cond = w_embedding, encoder_hidden_states=prompt_embeds)[0]
354
+
355
+ # compute the previous noisy sample x_t -> x_t-1
356
+ latents, denoised = self.scheduler.step(
357
+ torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs, return_dict = False
358
+ )
359
+
360
+ latents, denoised = latents.numpy(), denoised.numpy()
361
+
362
+ # call the callback, if provided
363
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
364
+ if callback is not None and i % callback_steps == 0:
365
+ callback(i, t, latents)
366
+
367
+ if output_type == "latent":
368
+ image = latents
369
+ has_nsfw_concept = None
370
+ else:
371
+ denoised /= self.vae_decoder.config.get("scaling_factor", 0.18215)
372
+ # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
373
+ image = np.concatenate(
374
+ [self.vae_decoder(latent_sample=denoised[i : i + 1])[0] for i in range(latents.shape[0])]
375
+ )
376
+ image, has_nsfw_concept = self.run_safety_checker(image)
377
+
378
+ if has_nsfw_concept is None:
379
+ do_denormalize = [True] * image.shape[0]
380
+ else:
381
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
382
+
383
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
384
+
385
+ if not return_dict:
386
+ return (image, has_nsfw_concept)
387
+
388
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
configs/lcm_scheduler.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+
25
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
26
+ from diffusers.utils import BaseOutput, logging
27
+ from diffusers.utils.torch_utils import randn_tensor
28
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
29
+
30
+
31
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
+
33
+
34
+ @dataclass
35
+ class LCMSchedulerOutput(BaseOutput):
36
+ """
37
+ Output class for the scheduler's `step` function output.
38
+
39
+ Args:
40
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
41
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
42
+ denoising loop.
43
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
44
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
45
+ `pred_original_sample` can be used to preview progress or for guidance.
46
+ """
47
+
48
+ prev_sample: torch.FloatTensor
49
+ denoised: Optional[torch.FloatTensor] = None
50
+
51
+
52
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
53
+ def betas_for_alpha_bar(
54
+ num_diffusion_timesteps,
55
+ max_beta=0.999,
56
+ alpha_transform_type="cosine",
57
+ ):
58
+ """
59
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
60
+ (1-beta) over time from t = [0,1].
61
+
62
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
63
+ to that part of the diffusion process.
64
+
65
+
66
+ Args:
67
+ num_diffusion_timesteps (`int`): the number of betas to produce.
68
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
69
+ prevent singularities.
70
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
71
+ Choose from `cosine` or `exp`
72
+
73
+ Returns:
74
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
75
+ """
76
+ if alpha_transform_type == "cosine":
77
+
78
+ def alpha_bar_fn(t):
79
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
80
+
81
+ elif alpha_transform_type == "exp":
82
+
83
+ def alpha_bar_fn(t):
84
+ return math.exp(t * -12.0)
85
+
86
+ else:
87
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
88
+
89
+ betas = []
90
+ for i in range(num_diffusion_timesteps):
91
+ t1 = i / num_diffusion_timesteps
92
+ t2 = (i + 1) / num_diffusion_timesteps
93
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
94
+ return torch.tensor(betas, dtype=torch.float32)
95
+
96
+
97
+ # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
98
+ def rescale_zero_terminal_snr(betas: torch.FloatTensor) -> torch.FloatTensor:
99
+ """
100
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
101
+
102
+
103
+ Args:
104
+ betas (`torch.FloatTensor`):
105
+ the betas that the scheduler is being initialized with.
106
+
107
+ Returns:
108
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
109
+ """
110
+ # Convert betas to alphas_bar_sqrt
111
+ alphas = 1.0 - betas
112
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
113
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
114
+
115
+ # Store old values.
116
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
117
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
118
+
119
+ # Shift so the last timestep is zero.
120
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
121
+
122
+ # Scale so the first timestep is back to the old value.
123
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
124
+
125
+ # Convert alphas_bar_sqrt to betas
126
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
127
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
128
+ alphas = torch.cat([alphas_bar[0:1], alphas])
129
+ betas = 1 - alphas
130
+
131
+ return betas
132
+
133
+
134
+ class LCMScheduler(SchedulerMixin, ConfigMixin):
135
+ """
136
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
137
+ non-Markovian guidance.
138
+
139
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config
140
+ attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be
141
+ accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving
142
+ functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions.
143
+
144
+ Args:
145
+ num_train_timesteps (`int`, defaults to 1000):
146
+ The number of diffusion steps to train the model.
147
+ beta_start (`float`, defaults to 0.0001):
148
+ The starting `beta` value of inference.
149
+ beta_end (`float`, defaults to 0.02):
150
+ The final `beta` value.
151
+ beta_schedule (`str`, defaults to `"linear"`):
152
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
153
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
154
+ trained_betas (`np.ndarray`, *optional*):
155
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
156
+ original_inference_steps (`int`, *optional*, defaults to 50):
157
+ The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we
158
+ will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule.
159
+ clip_sample (`bool`, defaults to `True`):
160
+ Clip the predicted sample for numerical stability.
161
+ clip_sample_range (`float`, defaults to 1.0):
162
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
163
+ set_alpha_to_one (`bool`, defaults to `True`):
164
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
165
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
166
+ otherwise it uses the alpha value at step 0.
167
+ steps_offset (`int`, defaults to 0):
168
+ An offset added to the inference steps. You can use a combination of `offset=1` and
169
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
170
+ Diffusion.
171
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
172
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
173
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
174
+ Video](https://imagen.research.google/video/paper.pdf) paper).
175
+ thresholding (`bool`, defaults to `False`):
176
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
177
+ as Stable Diffusion.
178
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
179
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
180
+ sample_max_value (`float`, defaults to 1.0):
181
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
182
+ timestep_spacing (`str`, defaults to `"leading"`):
183
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
184
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
185
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
186
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
187
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
188
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
189
+ """
190
+
191
+ order = 1
192
+
193
+ @register_to_config
194
+ def __init__(
195
+ self,
196
+ num_train_timesteps: int = 1000,
197
+ beta_start: float = 0.00085,
198
+ beta_end: float = 0.012,
199
+ beta_schedule: str = "scaled_linear",
200
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
201
+ original_inference_steps: int = 50,
202
+ clip_sample: bool = False,
203
+ clip_sample_range: float = 1.0,
204
+ set_alpha_to_one: bool = True,
205
+ steps_offset: int = 0,
206
+ prediction_type: str = "epsilon",
207
+ thresholding: bool = False,
208
+ dynamic_thresholding_ratio: float = 0.995,
209
+ sample_max_value: float = 1.0,
210
+ timestep_spacing: str = "leading",
211
+ rescale_betas_zero_snr: bool = False,
212
+ ):
213
+ if trained_betas is not None:
214
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
215
+ elif beta_schedule == "linear":
216
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
217
+ elif beta_schedule == "scaled_linear":
218
+ # this schedule is very specific to the latent diffusion model.
219
+ self.betas = (
220
+ torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
221
+ )
222
+ elif beta_schedule == "squaredcos_cap_v2":
223
+ # Glide cosine schedule
224
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
225
+ else:
226
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
227
+
228
+ # Rescale for zero SNR
229
+ if rescale_betas_zero_snr:
230
+ self.betas = rescale_zero_terminal_snr(self.betas)
231
+
232
+ self.alphas = 1.0 - self.betas
233
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
234
+
235
+ # At every step in ddim, we are looking into the previous alphas_cumprod
236
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
237
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
238
+ # whether we use the final alpha of the "non-previous" one.
239
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
240
+
241
+ # standard deviation of the initial noise distribution
242
+ self.init_noise_sigma = 1.0
243
+
244
+ # setable values
245
+ self.num_inference_steps = None
246
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
247
+
248
+ self._step_index = None
249
+
250
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index
251
+ def _init_step_index(self, timestep):
252
+ if isinstance(timestep, torch.Tensor):
253
+ timestep = timestep.to(self.timesteps.device)
254
+
255
+ index_candidates = (self.timesteps == timestep).nonzero()
256
+
257
+ # The sigma index that is taken for the **very** first `step`
258
+ # is always the second index (or the last index if there is only 1)
259
+ # This way we can ensure we don't accidentally skip a sigma in
260
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
261
+ if len(index_candidates) > 1:
262
+ step_index = index_candidates[1]
263
+ else:
264
+ step_index = index_candidates[0]
265
+
266
+ self._step_index = step_index.item()
267
+
268
+ @property
269
+ def step_index(self):
270
+ return self._step_index
271
+
272
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
273
+ """
274
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
275
+ current timestep.
276
+
277
+ Args:
278
+ sample (`torch.FloatTensor`):
279
+ The input sample.
280
+ timestep (`int`, *optional*):
281
+ The current timestep in the diffusion chain.
282
+ Returns:
283
+ `torch.FloatTensor`:
284
+ A scaled input sample.
285
+ """
286
+ return sample
287
+
288
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
289
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
290
+ """
291
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
292
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
293
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
294
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
295
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
296
+
297
+ https://arxiv.org/abs/2205.11487
298
+ """
299
+ dtype = sample.dtype
300
+ batch_size, channels, *remaining_dims = sample.shape
301
+
302
+ if dtype not in (torch.float32, torch.float64):
303
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
304
+
305
+ # Flatten sample for doing quantile calculation along each image
306
+ sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
307
+
308
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
309
+
310
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
311
+ s = torch.clamp(
312
+ s, min=1, max=self.config.sample_max_value
313
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
314
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
315
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
316
+
317
+ sample = sample.reshape(batch_size, channels, *remaining_dims)
318
+ sample = sample.to(dtype)
319
+
320
+ return sample
321
+
322
+ def set_timesteps(
323
+ self,
324
+ num_inference_steps: int,
325
+ device: Union[str, torch.device] = None,
326
+ original_inference_steps: Optional[int] = None,
327
+ ):
328
+ """
329
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
330
+
331
+ Args:
332
+ num_inference_steps (`int`):
333
+ The number of diffusion steps used when generating samples with a pre-trained model.
334
+ device (`str` or `torch.device`, *optional*):
335
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
336
+ original_inference_steps (`int`, *optional*):
337
+ The original number of inference steps, which will be used to generate a linearly-spaced timestep
338
+ schedule (which is different from the standard `diffusers` implementation). We will then take
339
+ `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as
340
+ our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.
341
+ """
342
+
343
+ if num_inference_steps > self.config.num_train_timesteps:
344
+ raise ValueError(
345
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
346
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
347
+ f" maximal {self.config.num_train_timesteps} timesteps."
348
+ )
349
+
350
+ self.num_inference_steps = num_inference_steps
351
+ original_steps = (
352
+ original_inference_steps if original_inference_steps is not None else self.original_inference_steps
353
+ )
354
+
355
+ if original_steps > self.config.num_train_timesteps:
356
+ raise ValueError(
357
+ f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:"
358
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
359
+ f" maximal {self.config.num_train_timesteps} timesteps."
360
+ )
361
+
362
+ if num_inference_steps > original_steps:
363
+ raise ValueError(
364
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:"
365
+ f" {original_steps} because the final timestep schedule will be a subset of the"
366
+ f" `original_inference_steps`-sized initial timestep schedule."
367
+ )
368
+
369
+ # LCM Timesteps Setting
370
+ # Currently, only linear spacing is supported.
371
+ c = self.config.num_train_timesteps // original_steps
372
+ # LCM Training Steps Schedule
373
+ lcm_origin_timesteps = np.asarray(list(range(1, original_steps + 1))) * c - 1
374
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
375
+ # LCM Inference Steps Schedule
376
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps]
377
+
378
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device=device, dtype=torch.long)
379
+
380
+ self._step_index = None
381
+
382
+ def get_scalings_for_boundary_condition_discrete(self, t):
383
+ self.sigma_data = 0.5 # Default: 0.5
384
+
385
+ # By dividing 0.1: This is almost a delta function at t=0.
386
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
387
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
388
+ return c_skip, c_out
389
+
390
+ def step(
391
+ self,
392
+ model_output: torch.FloatTensor,
393
+ timestep: int,
394
+ sample: torch.FloatTensor,
395
+ generator: Optional[torch.Generator] = None,
396
+ return_dict: bool = True,
397
+ ) -> Union[LCMSchedulerOutput, Tuple]:
398
+ """
399
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
400
+ process from the learned model outputs (most often the predicted noise).
401
+
402
+ Args:
403
+ model_output (`torch.FloatTensor`):
404
+ The direct output from learned diffusion model.
405
+ timestep (`float`):
406
+ The current discrete timestep in the diffusion chain.
407
+ sample (`torch.FloatTensor`):
408
+ A current instance of a sample created by the diffusion process.
409
+ generator (`torch.Generator`, *optional*):
410
+ A random number generator.
411
+ return_dict (`bool`, *optional*, defaults to `True`):
412
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
413
+ Returns:
414
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
415
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
416
+ tuple is returned where the first element is the sample tensor.
417
+ """
418
+ if self.num_inference_steps is None:
419
+ raise ValueError(
420
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
421
+ )
422
+
423
+ if self.step_index is None:
424
+ self._init_step_index(timestep)
425
+
426
+ # 1. get previous step value
427
+ prev_step_index = self.step_index + 1
428
+ if prev_step_index < len(self.timesteps):
429
+ prev_timestep = self.timesteps[prev_step_index]
430
+ else:
431
+ prev_timestep = timestep
432
+
433
+ # 2. compute alphas, betas
434
+ alpha_prod_t = self.alphas_cumprod[timestep]
435
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
436
+
437
+ beta_prod_t = 1 - alpha_prod_t
438
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
439
+
440
+ # 3. Get scalings for boundary conditions
441
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
442
+
443
+ # 4. Compute the predicted original sample x_0 based on the model parameterization
444
+ if self.config.prediction_type == "epsilon": # noise-prediction
445
+ predicted_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
446
+ elif self.config.prediction_type == "sample": # x-prediction
447
+ predicted_original_sample = model_output
448
+ elif self.config.prediction_type == "v_prediction": # v-prediction
449
+ predicted_original_sample = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
450
+ else:
451
+ raise ValueError(
452
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
453
+ " `v_prediction` for `LCMScheduler`."
454
+ )
455
+
456
+ # 5. Clip or threshold "predicted x_0"
457
+ if self.config.thresholding:
458
+ predicted_original_sample = self._threshold_sample(predicted_original_sample)
459
+ elif self.config.clip_sample:
460
+ predicted_original_sample = predicted_original_sample.clamp(
461
+ -self.config.clip_sample_range, self.config.clip_sample_range
462
+ )
463
+
464
+ # 6. Denoise model output using boundary conditions
465
+ denoised = c_out * predicted_original_sample + c_skip * sample
466
+
467
+ # 7. Sample and inject noise z ~ N(0, I) for MultiStep Inference
468
+ # Noise is not used for one-step sampling.
469
+ if len(self.timesteps) > 1:
470
+ noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device)
471
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
472
+ else:
473
+ prev_sample = denoised
474
+
475
+ # upon completion increase step index by one
476
+ self._step_index += 1
477
+
478
+ if not return_dict:
479
+ return (prev_sample, denoised)
480
+
481
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
482
+
483
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
484
+ def add_noise(
485
+ self,
486
+ original_samples: torch.FloatTensor,
487
+ noise: torch.FloatTensor,
488
+ timesteps: torch.IntTensor,
489
+ ) -> torch.FloatTensor:
490
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
491
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
492
+ timesteps = timesteps.to(original_samples.device)
493
+
494
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
495
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
496
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
497
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
498
+
499
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
500
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
501
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
502
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
503
+
504
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
505
+ return noisy_samples
506
+
507
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
508
+ def get_velocity(
509
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
510
+ ) -> torch.FloatTensor:
511
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
512
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
513
+ timesteps = timesteps.to(sample.device)
514
+
515
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
516
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
517
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
518
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
519
+
520
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
521
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
522
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
523
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
524
+
525
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
526
+ return velocity
527
+
528
+ def __len__(self):
529
+ return self.config.num_train_timesteps
dev-tools/convert_to_openvino.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Optional, Tuple, OrderedDict
2
+ from transformers import CLIPTextConfig
3
+ from diffusers import UNet2DConditionModel
4
+
5
+ import torch
6
+
7
+ from optimum.exporters.onnx.model_configs import VisionOnnxConfig, NormalizedConfig, DummyVisionInputGenerator, DummyTimestepInputGenerator, DummySeq2SeqDecoderTextInputGenerator, DummySeq2SeqDecoderTextInputGenerator
8
+ from optimum.exporters.openvino import main_export
9
+ from optimum.utils.input_generators import DummyInputGenerator, DEFAULT_DUMMY_SHAPES
10
+ from optimum.utils.normalized_config import NormalizedTextConfig
11
+
12
+ # IMPORTANT: You need to specify some scheduler in downloaded model cache folder to avoid errors
13
+
14
+ class CustomDummyTimestepInputGenerator(DummyInputGenerator):
15
+ """
16
+ Generates dummy time step inputs.
17
+ """
18
+
19
+ SUPPORTED_INPUT_NAMES = (
20
+ "timestep",
21
+ "timestep_cond",
22
+ "text_embeds",
23
+ "time_ids",
24
+ )
25
+
26
+ def __init__(
27
+ self,
28
+ task: str,
29
+ normalized_config: NormalizedConfig,
30
+ batch_size: int = DEFAULT_DUMMY_SHAPES["batch_size"],
31
+ time_cond_proj_dim: int = 256,
32
+ random_batch_size_range: Optional[Tuple[int, int]] = None,
33
+ **kwargs,
34
+ ):
35
+ self.task = task
36
+ self.vocab_size = normalized_config.vocab_size
37
+ self.text_encoder_projection_dim = normalized_config.text_encoder_projection_dim
38
+ self.time_ids = 5 if normalized_config.requires_aesthetics_score else 6
39
+ if random_batch_size_range:
40
+ low, high = random_batch_size_range
41
+ self.batch_size = random.randint(low, high)
42
+ else:
43
+ self.batch_size = batch_size
44
+ self.time_cond_proj_dim = normalized_config.get("time_cond_proj_dim", time_cond_proj_dim)
45
+
46
+ def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int64", float_dtype: str = "fp32"):
47
+ shape = [self.batch_size]
48
+
49
+ if input_name == "timestep":
50
+ return self.random_int_tensor(shape, max_value=self.vocab_size, framework=framework, dtype=int_dtype)
51
+
52
+ if input_name == "timestep_cond":
53
+ shape.append(self.time_cond_proj_dim)
54
+ return self.random_float_tensor(shape, min_value=-1.0, max_value=1.0, framework=framework, dtype=float_dtype)
55
+
56
+
57
+ shape.append(self.text_encoder_projection_dim if input_name == "text_embeds" else self.time_ids)
58
+ return self.random_float_tensor(shape, max_value=self.vocab_size, framework=framework, dtype=float_dtype)
59
+
60
+ class LCMUNetOnnxConfig(VisionOnnxConfig):
61
+ ATOL_FOR_VALIDATION = 1e-3
62
+ # The ONNX export of a CLIPText architecture, an other Stable Diffusion component, needs the Trilu
63
+ # operator support, available since opset 14
64
+ DEFAULT_ONNX_OPSET = 14
65
+
66
+ NORMALIZED_CONFIG_CLASS = NormalizedConfig.with_args(
67
+ image_size="sample_size",
68
+ num_channels="in_channels",
69
+ hidden_size="cross_attention_dim",
70
+ vocab_size="norm_num_groups",
71
+ allow_new=True,
72
+ )
73
+
74
+ DUMMY_INPUT_GENERATOR_CLASSES = (
75
+ DummyVisionInputGenerator,
76
+ CustomDummyTimestepInputGenerator,
77
+ DummySeq2SeqDecoderTextInputGenerator,
78
+ )
79
+
80
+ @property
81
+ def inputs(self) -> Dict[str, Dict[int, str]]:
82
+ common_inputs = OrderedDict({
83
+ "sample": {0: "batch_size", 1: "num_channels", 2: "height", 3: "width"},
84
+ "timestep": {0: "steps"},
85
+ "encoder_hidden_states": {0: "batch_size", 1: "sequence_length"},
86
+ "timestep_cond": {0: "batch_size"},
87
+ })
88
+
89
+ # TODO : add text_image, image and image_embeds
90
+ if getattr(self._normalized_config, "addition_embed_type", None) == "text_time":
91
+ common_inputs["text_embeds"] = {0: "batch_size"}
92
+ common_inputs["time_ids"] = {0: "batch_size"}
93
+
94
+ return common_inputs
95
+
96
+ @property
97
+ def outputs(self) -> Dict[str, Dict[int, str]]:
98
+ return {
99
+ "out_sample": {0: "batch_size", 1: "num_channels", 2: "height", 3: "width"},
100
+ }
101
+
102
+ @property
103
+ def torch_to_onnx_output_map(self) -> Dict[str, str]:
104
+ return {
105
+ "sample": "out_sample",
106
+ }
107
+
108
+ def generate_dummy_inputs(self, framework: str = "pt", **kwargs):
109
+ dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)
110
+ dummy_inputs["encoder_hidden_states"] = dummy_inputs["encoder_hidden_states"][0]
111
+
112
+ if getattr(self._normalized_config, "addition_embed_type", None) == "text_time":
113
+ dummy_inputs["added_cond_kwargs"] = {
114
+ "text_embeds": dummy_inputs.pop("text_embeds"),
115
+ "time_ids": dummy_inputs.pop("time_ids"),
116
+ }
117
+
118
+ return dummy_inputs
119
+
120
+ def ordered_inputs(self, model) -> Dict[str, Dict[int, str]]:
121
+ return self.inputs # Breaks order if timestep_cond involved ( so just copy original one )
122
+
123
+ model_id = "SimianLuo/LCM_Dreamshaper_v7"
124
+
125
+ text_encoder_config = CLIPTextConfig.from_pretrained(model_id, subfolder = "text_encoder")
126
+ unet_config = UNet2DConditionModel.from_pretrained(model_id, subfolder = "unet").config
127
+
128
+ unet_config.text_encoder_projection_dim = text_encoder_config.projection_dim
129
+ unet_config.requires_aesthetics_score = False
130
+
131
+ custom_onnx_configs = {
132
+ "unet": LCMUNetOnnxConfig(config = unet_config, task = "semantic-segmentation")
133
+ }
134
+
135
+ main_export(model_name_or_path = model_id, output = "./", task = "stable-diffusion", fp16 = False, int8 = False, custom_onnx_configs = custom_onnx_configs)
engine/__pycache__/generateCPU.cpython-311.pyc ADDED
Binary file (2.95 kB). View file
 
engine/__pycache__/promptGenerator.cpython-311.pyc ADDED
Binary file (2.48 kB). View file
 
engine/__pycache__/upscaler.cpython-311.pyc ADDED
Binary file (766 Bytes). View file
 
engine/generate.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import requests
3
+ import torch
4
+ import time
5
+ import gradio as gr
6
+ from io import BytesIO
7
+ from PIL import Image
8
+ import imageio
9
+ from dotenv import load_dotenv
10
+ import os
11
+
12
+ load_dotenv("config.txt")
13
+
14
+ path_to_base_model = os.getenv("path_to_base_model")
15
+ path_to_inpaint_model = os.getenv("path_to_inpaint_model")
16
+
17
+ xl = os.getenv("xl")
18
+
19
+ if xl == "True":
20
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline
21
+ pipe_t2i = StableDiffusionXLPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
22
+ pipe_t2i = pipe_t2i.to("cuda")
23
+
24
+ pipe_i2i = StableDiffusionXLImg2ImgPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
25
+ pipe_i2i = pipe_i2i.to("cuda")
26
+
27
+ pipe_inpaint = StableDiffusionXLInpaintPipeline.from_single_file(path_to_inpaint_model, torch_dtype=torch.float16, use_safetensors=True)
28
+ pipe_inpaint = pipe_inpaint.to("cuda")
29
+ else:
30
+ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline
31
+ pipe_t2i = StableDiffusionPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
32
+ pipe_t2i = pipe_t2i.to("cuda")
33
+
34
+ pipe_i2i = StableDiffusionImg2ImgPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
35
+ pipe_i2i = pipe_i2i.to("cuda")
36
+
37
+ pipe_inpaint = StableDiffusionInpaintPipeline.from_single_file(path_to_inpaint_model, torch_dtype=torch.float16, use_safetensors=True)
38
+ pipe_inpaint = pipe_inpaint.to("cuda")
39
+
40
+
41
+ pipe_t2i.load_lora_weights(pretrained_model_name_or_path_or_dict="models/lora", weight_name="epic_noiseoffset.safetensors")
42
+ pipe_t2i.fuse_lora(lora_scale=0.1)
43
+
44
+ pipe_i2i.load_lora_weights(pretrained_model_name_or_path_or_dict="models/lora", weight_name="epic_noiseoffset.safetensors")
45
+ pipe_i2i.fuse_lora(lora_scale=0.1)
46
+
47
+ pipe_inpaint.load_lora_weights(pretrained_model_name_or_path_or_dict="models/lora", weight_name="epic_noiseoffset.safetensors")
48
+ pipe_inpaint.fuse_lora(lora_scale=0.1)
49
+
50
+
51
+ def gpugen(prompt, mode, guidance, width, height, num_images, i2i_strength, inpaint_strength, i2i_change, inpaint_change, init=None, inpaint_image=None, progress = gr.Progress(track_tqdm=True)):
52
+ if mode == "Fast":
53
+ steps = 30
54
+ elif mode == "High Quality":
55
+ steps = 45
56
+ else:
57
+ steps = 20
58
+ results = []
59
+ seed = random.randint(1, 9999999)
60
+ if not i2i_change and not inpaint_change:
61
+ num = random.randint(100, 99999)
62
+ start_time = time.time()
63
+ for _ in range(num_images):
64
+ image = pipe_t2i(
65
+ prompt=f"{prompt}, epic realistic, faded, ((neutral colors)), art, (hdr:1.5), (muted colors:1.2), pastel, hyperdetailed, (artstation:1.5), warm lights, dramatic light, (intricate details:1.2), vignette, complex background, rutkowski",
66
+ negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
67
+ num_inference_steps=steps,
68
+ guidance_scale=guidance,
69
+ width=width, height=height,
70
+ seed=seed,
71
+ ).images
72
+ image[0].save(f"outputs/{num}_txt2img_gpu{_}.jpg")
73
+ results.append(image[0])
74
+ end_time = time.time()
75
+ execution_time = end_time - start_time
76
+ return results, f"Time taken: {execution_time} sec."
77
+ elif inpaint_change and not i2i_change:
78
+ imageio.imwrite("output_image.png", inpaint_image["mask"])
79
+
80
+ num = random.randint(100, 99999)
81
+ start_time = time.time()
82
+ for _ in range(num_images):
83
+ image = pipe_inpaint(
84
+ prompt=f"{prompt}, epic realistic, faded, ((neutral colors)), art, (hdr:1.5), (muted colors:1.2), pastel, hyperdetailed, (artstation:1.5), warm lights, dramatic light, (intricate details:1.2), vignette, complex background, rutkowski",
85
+ image=inpaint_image["image"],
86
+ mask_image=inpaint_image["mask"],
87
+ negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
88
+ num_inference_steps=steps,
89
+ guidance_scale=guidance,
90
+ strength=inpaint_strength,
91
+ width=width, height=height,
92
+ seed=seed,
93
+ ).images
94
+ image[0].save(f"outputs/{num}_inpaint_gpu{_}.jpg")
95
+ results.append(image[0])
96
+ end_time = time.time()
97
+ execution_time = end_time - start_time
98
+ return results, f"Time taken: {execution_time} sec."
99
+
100
+ else:
101
+ num = random.randint(100, 99999)
102
+ start_time = time.time()
103
+ for _ in range(num_images):
104
+ image = pipe_i2i(
105
+ prompt=f"{prompt}, epic realistic, faded, ((neutral colors)), art, (hdr:1.5), (muted colors:1.2), pastel, hyperdetailed, (artstation:1.5), warm lights, dramatic light, (intricate details:1.2), vignette, complex background, rutkowski",
106
+ negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
107
+ image=init,
108
+ num_inference_steps=steps,
109
+ guidance_scale=guidance,
110
+ width=width, height=height,
111
+ strength=i2i_strength,
112
+ seed=seed,
113
+ ).images
114
+ image[0].save(f"outputs/{num}_img2img_gpu{_}.jpg")
115
+ results.append(image[0])
116
+ end_time = time.time()
117
+ execution_time = end_time - start_time
118
+ return results, f"Time taken: {execution_time} sec."
119
+
120
+
engine/generateCPU.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configs.lcm_ov_pipeline import OVLatentConsistencyModelPipeline
2
+ from configs.lcm_scheduler import LCMScheduler
3
+ import random
4
+ import requests
5
+ import gradio as gr
6
+ import torch
7
+ import time
8
+ from PIL import Image
9
+ from io import BytesIO
10
+ import os
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv("config.txt")
14
+
15
+ scheduler = LCMScheduler.from_pretrained("models/checkpoint/cpu-model", subfolder = "scheduler")
16
+
17
+ pipe_t2i = OVLatentConsistencyModelPipeline.from_pretrained(
18
+ "models/checkpoint/cpu-model",
19
+ scheduler=scheduler,
20
+ compile = False,
21
+ )
22
+
23
+ width = int(input('Enter width: '))
24
+ height = int(input('Enter height: '))
25
+
26
+ pipe_t2i.reshape(batch_size=1, width=width, height=height, num_images_per_prompt=1)
27
+ pipe_t2i.compile()
28
+
29
+ print("[PIPE COMPILED]")
30
+
31
+ def cpugen(prompt, mode, guidance, num_images, progress = gr.Progress(track_tqdm=True)):
32
+ img2img_change=False
33
+ results = []
34
+ if mode == "Fast":
35
+ steps = 6
36
+ elif mode == "High Quality":
37
+ steps = 10
38
+ else:
39
+ steps = 4
40
+ seed = random.randint(1, 99999999)
41
+ num = random.randint(100, 99999)
42
+ #name = f"outputs/{num}_txt2img_cpu.jpg"
43
+ if not img2img_change:
44
+ start_time = time.time()
45
+ for _ in range(num_images):
46
+ image = pipe_t2i(
47
+ prompt=f"{prompt}, epic realistic, faded, ((neutral colors)), art, (hdr:1.5), (muted colors:1.2), pastel, hyperdetailed, (artstation:1.5), warm lights, dramatic light, (intricate details:1.2), vignette, complex background, rutkowski",
48
+ #negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
49
+ width=width,
50
+ height=height,
51
+ num_inference_steps=steps,
52
+ guidance_scale=guidance,
53
+ output_type="pil"
54
+ ).images
55
+ image[0].save(f"outputs/{num}_txt2img_cpu{_}.jpg")
56
+ results.append(image[0])
57
+ #results[_].save(name)
58
+ end_time = time.time()
59
+ execution_time = end_time - start_time
60
+ '''
61
+ else:
62
+ init_image = init.resize((width, height))
63
+ start_time = time.time()
64
+ for _ in range(num_images):
65
+ image = pipe_i2i(
66
+ prompt=prompt,
67
+ image=init_image,
68
+ #negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
69
+ width=width,
70
+ height=height,
71
+ num_inference_steps=steps,
72
+ guidance_scale=guidance,
73
+ output_type="pil"
74
+ ).images
75
+ image[0].save(f"outputs/{num}_img2img_cpu{_}.jpg")
76
+ results.append(image[0])
77
+ #results[_].save(name)
78
+ end_time = time.time()
79
+ execution_time = end_time - start_time
80
+ '''
81
+
82
+
83
+
84
+
85
+ return results, f"Time taken: {execution_time} sec."
engine/promptGenerator.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, set_seed
2
+ import random
3
+ import time
4
+ import re
5
+
6
+ gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
7
+ with open("source/prompt-ideas.txt", "r") as f:
8
+ line = f.readlines()
9
+
10
+
11
+ def prompting(starting_text, history):
12
+ seed = random.randint(100, 1000000)
13
+ set_seed(seed)
14
+
15
+ if starting_text == "":
16
+ starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
17
+ starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
18
+
19
+ response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=1)
20
+ response_list = []
21
+ for x in response:
22
+ resp = x['generated_text'].strip()
23
+ if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
24
+ response_list.append(resp+'\n')
25
+
26
+ response_end = "\n".join(response_list)
27
+ response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
28
+ response_end = response_end.replace("<", "").replace(">", "")
29
+
30
+ if response_end != "":
31
+ return response_end
engine/upscaler.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+
5
+ def upscale_image(input_image):
6
+ start_time = time.time()
7
+
8
+ upscale_factor = 2
9
+ output_image = cv2.resize(input_image, None, fx = upscale_factor, fy = upscale_factor, interpolation = cv2.INTER_CUBIC)
10
+
11
+ end_time = time.time()
12
+ execution_time = end_time - start_time
13
+
14
+ return [output_image], f"Time taken: {execution_time} sec."
first-run.bat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ @echo off
2
+
3
+ pip install -q -r pip/requirements.txt
for_colab/engine/generate.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import requests
3
+ import torch
4
+ import time
5
+ import gradio as gr
6
+ from io import BytesIO
7
+ from PIL import Image
8
+ import imageio
9
+ from dotenv import load_dotenv
10
+ import os
11
+
12
+ load_dotenv("config.txt")
13
+
14
+ path_to_base_model = "models/checkpoint/gpu-model/base/dreamdrop-v1.safetensors"
15
+ path_to_inpaint_model = "models/checkpoint/gpu-model/inpaint/dreamdrop-inpainting.safetensors"
16
+
17
+ xl = os.getenv("xl")
18
+
19
+ if xl == "True":
20
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline
21
+ pipe_t2i = StableDiffusionXLPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
22
+ pipe_t2i = pipe_t2i.to("cuda")
23
+
24
+ pipe_i2i = StableDiffusionXLImg2ImgPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
25
+ pipe_i2i = pipe_i2i.to("cuda")
26
+
27
+ pipe_inpaint = StableDiffusionXLInpaintPipeline.from_single_file(path_to_inpaint_model, torch_dtype=torch.float16, use_safetensors=True)
28
+ pipe_inpaint = pipe_inpaint.to("cuda")
29
+ else:
30
+ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline
31
+ pipe_t2i = StableDiffusionPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
32
+ pipe_t2i = pipe_t2i.to("cuda")
33
+
34
+ pipe_i2i = StableDiffusionImg2ImgPipeline.from_single_file(path_to_base_model, torch_dtype=torch.float16, use_safetensors=True)
35
+ pipe_i2i = pipe_i2i.to("cuda")
36
+
37
+ pipe_inpaint = StableDiffusionInpaintPipeline.from_single_file(path_to_inpaint_model, torch_dtype=torch.float16, use_safetensors=True)
38
+ pipe_inpaint = pipe_inpaint.to("cuda")
39
+
40
+
41
+ pipe_t2i.load_lora_weights(pretrained_model_name_or_path_or_dict="models/lora", weight_name="epic_noiseoffset.safetensors")
42
+ pipe_t2i.fuse_lora(lora_scale=0.1)
43
+
44
+ pipe_i2i.load_lora_weights(pretrained_model_name_or_path_or_dict="models/lora", weight_name="epic_noiseoffset.safetensors")
45
+ pipe_i2i.fuse_lora(lora_scale=0.1)
46
+
47
+ pipe_inpaint.load_lora_weights(pretrained_model_name_or_path_or_dict="models/lora", weight_name="epic_noiseoffset.safetensors")
48
+ pipe_inpaint.fuse_lora(lora_scale=0.1)
49
+
50
+
51
+ def gpugen(prompt, mode, guidance, width, height, num_images, i2i_strength, inpaint_strength, i2i_change, inpaint_change, init=None, inpaint_image=None, progress = gr.Progress(track_tqdm=True)):
52
+ if mode == "Fast":
53
+ steps = 30
54
+ elif mode == "High Quality":
55
+ steps = 45
56
+ else:
57
+ steps = 20
58
+ results = []
59
+ seed = random.randint(1, 9999999)
60
+ if not i2i_change and not inpaint_change:
61
+ num = random.randint(100, 99999)
62
+ start_time = time.time()
63
+ for _ in range(num_images):
64
+ image = pipe_t2i(
65
+ prompt=prompt,
66
+ negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
67
+ num_inference_steps=steps,
68
+ guidance_scale=guidance,
69
+ width=width, height=height,
70
+ seed=seed,
71
+ ).images
72
+ image[0].save(f"outputs/{num}_txt2img_gpu{_}.jpg")
73
+ results.append(image[0])
74
+ end_time = time.time()
75
+ execution_time = end_time - start_time
76
+ return results, f"Time taken: {execution_time} sec."
77
+ elif inpaint_change and not i2i_change:
78
+ imageio.imwrite("output_image.png", inpaint_image["mask"])
79
+
80
+ num = random.randint(100, 99999)
81
+ start_time = time.time()
82
+ for _ in range(num_images):
83
+ image = pipe_inpaint(
84
+ prompt=prompt,
85
+ image=inpaint_image["image"],
86
+ mask_image=inpaint_image["mask"],
87
+ negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
88
+ num_inference_steps=steps,
89
+ guidance_scale=guidance,
90
+ strength=inpaint_strength,
91
+ width=width, height=height,
92
+ seed=seed,
93
+ ).images
94
+ image[0].save(f"outputs/{num}_inpaint_gpu{_}.jpg")
95
+ results.append(image[0])
96
+ end_time = time.time()
97
+ execution_time = end_time - start_time
98
+ return results, f"Time taken: {execution_time} sec."
99
+
100
+ else:
101
+ num = random.randint(100, 99999)
102
+ start_time = time.time()
103
+ for _ in range(num_images):
104
+ image = pipe_i2i(
105
+ prompt=prompt,
106
+ negative_prompt="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
107
+ image=init,
108
+ num_inference_steps=steps,
109
+ guidance_scale=guidance,
110
+ width=width, height=height,
111
+ strength=i2i_strength,
112
+ seed=seed,
113
+ ).images
114
+ image[0].save(f"outputs/{num}_img2img_gpu{_}.jpg")
115
+ results.append(image[0])
116
+ end_time = time.time()
117
+ execution_time = end_time - start_time
118
+ return results, f"Time taken: {execution_time} sec."
119
+
120
+
index.html CHANGED
@@ -3,12 +3,12 @@
3
  <head>
4
  <meta charset="utf-8" />
5
  <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
  <link rel="stylesheet" href="style.css" />
8
  </head>
9
  <body>
10
  <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
  <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
  <p>
14
  Also don't forget to check the
 
3
  <head>
4
  <meta charset="utf-8" />
5
  <meta name="viewport" content="width=device-width" />
6
+ <title>Rensor</title>
7
  <link rel="stylesheet" href="style.css" />
8
  </head>
9
  <body>
10
  <div class="card">
11
+ <h1>Rensor Diffusion</h1>
12
  <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
  <p>
14
  Also don't forget to check the
install-model-cpu.bat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ @echo off
2
+
3
+ python install-model-cpu.py
install-model-cpu.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configs.lcm_ov_pipeline import OVLatentConsistencyModelPipeline
2
+ from configs.lcm_scheduler import LCMScheduler
3
+ #from optimum.intel import OVStableDiffusionImg2ImgPipeline
4
+ import random
5
+ import requests
6
+ import torch
7
+ from PIL import Image
8
+ from io import BytesIO
9
+
10
+ scheduler = LCMScheduler.from_pretrained("deinferno/LCM_Dreamshaper_v7-openvino", subfolder = "scheduler")
11
+
12
+ pipe = OVLatentConsistencyModelPipeline.from_pretrained(
13
+ "deinferno/LCM_Dreamshaper_v7-openvino",
14
+ scheduler=scheduler,
15
+ compile = False,
16
+ )
17
+
18
+ pipe.save_pretrained(save_directory="models/checkpoint/cpu-model")
install-model-gpu.bat ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ python requests/request-to-model-gpu.py
4
+
5
+ python install-model-gpu.py
install-model-gpu.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import requests
3
+ import torch
4
+ from io import BytesIO
5
+ from PIL import Image
6
+ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, DiffusionPipeline
7
+ import os
8
+ import time
9
+ from dotenv import load_dotenv
10
+
11
+ load_dotenv("config.txt")
12
+
13
+ xl = os.getenv("xl")
14
+
15
+ with torch.no_grad():
16
+ pipe = StableDiffusionPipeline.from_single_file(
17
+ "models/checkpoint/gpu-model/base/dreamdrop-v1.safetensors",
18
+ use_safetensors=True,
19
+ cache_dir="models/checkpoint/gpu-model/base/cache_dir",
20
+ scheduler_type="euler-ancestral"
21
+ )
22
+ time.sleep(20)
23
+ pipe_inpaint = StableDiffusionInpaintPipeline.from_single_file(
24
+ "models/checkpoint/gpu-model/inpaint/dreamdrop-inpainting.safetensors",
25
+ use_safetensors=True,
26
+ cache_dir="models/checkpoint/gpu-model/inpaint/cache_dir",
27
+ scheduler_type="euler-ancestral"
28
+ )
29
+
models/checkpoint/cpu-model/_Base model for CPU ADDED
File without changes
models/checkpoint/gpu-model/base/_Base models for GPU ADDED
File without changes
models/checkpoint/gpu-model/inpaint/_Inpaint models for GPU ADDED
File without changes
models/lora/_There the best loras for generation ADDED
File without changes
models/lora/epic_noiseoffset.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81680c064e9f50dfcc11ec5e25da1832f523ec84afd544f372c7786f3ddcbbac
3
+ size 81479800
outputs/_All generated images saving there ADDED
File without changes
pip/_If error try run .bat file ADDED
File without changes
pip/install-or-update.bat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ @echo off
2
+
3
+ pip install -q -r requirements.txt
pip/requirements.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ optimum-intel
2
+ openvino
3
+ diffusers
4
+ onnx
5
+ gradio==3.41.2
6
+ spaces
7
+ huggingface_hub
8
+ transformers
9
+ pillow
10
+ torch
11
+ requests
12
+ argparse
13
+ numpy
14
+ opencv-python
15
+ rembg
16
+ imageio
17
+ python-dotenv
18
+ jinja2
19
+ sentencepiece
20
+ httpx==0.24.1
21
+ omegaconf
requests/request-to-model-gpu.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+
3
+ def download_file_with_wget(url, save_directory):
4
+ try:
5
+ command = ["wget", url, "-P", save_directory]
6
+
7
+ subprocess.run(command, check=True)
8
+
9
+ except subprocess.CalledProcessError as e:
10
+ print(f"Error: {e}")
11
+
12
+
13
+ download_file_with_wget("https://huggingface.co/ehristoforu/dreamdrop/resolve/main/dreamdrop-v1.safetensors", "models/checkpoint/gpu-model/base")
14
+
15
+ download_file_with_wget("https://huggingface.co/ehristoforu/dreamdrop-inpainting/resolve/main/dreamdrop-inpainting.safetensors", "models/checkpoint/gpu-model/inpaint")
source/_All source files saving there ADDED
File without changes
source/prompt-ideas.txt ADDED
The diff for this file is too large to render. See raw diff
 
theme/ui-theme.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"theme": {"_font": [{"__gradio_font__": true, "name": "Source Sans Pro", "class": "google"}, {"__gradio_font__": true, "name": "ui-sans-serif", "class": "font"}, {"__gradio_font__": true, "name": "system-ui", "class": "font"}, {"__gradio_font__": true, "name": "sans-serif", "class": "font"}], "_font_mono": [{"__gradio_font__": true, "name": "IBM Plex Mono", "class": "google"}, {"__gradio_font__": true, "name": "ui-monospace", "class": "font"}, {"__gradio_font__": true, "name": "Consolas", "class": "font"}, {"__gradio_font__": true, "name": "monospace", "class": "font"}], "_stylesheets": ["https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap", "https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600&display=swap"], "background_fill_primary": "white", "background_fill_primary_dark": "*neutral_950", "background_fill_secondary": "*neutral_50", "background_fill_secondary_dark": "*neutral_900", "block_background_fill": "*background_fill_primary", "block_background_fill_dark": "*neutral_800", "block_border_color": "*border_color_primary", "block_border_color_dark": "*border_color_primary", "block_border_width": "1px", "block_border_width_dark": "1px", "block_info_text_color": "*body_text_color_subdued", "block_info_text_color_dark": "*body_text_color_subdued", "block_info_text_size": "*text_sm", "block_info_text_weight": "400", "block_label_background_fill": "*background_fill_primary", "block_label_background_fill_dark": "*background_fill_secondary", "block_label_border_color": "*border_color_primary", "block_label_border_color_dark": "*border_color_primary", "block_label_border_width": "1px", "block_label_border_width_dark": "1px", "block_label_margin": "0", "block_label_padding": "*spacing_sm *spacing_lg", "block_label_radius": "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0", "block_label_right_radius": "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)", "block_label_shadow": "*block_shadow", "block_label_text_color": "*neutral_500", "block_label_text_color_dark": "*neutral_200", "block_label_text_size": "*text_sm", "block_label_text_weight": "400", "block_padding": "*spacing_xl calc(*spacing_xl + 2px)", "block_radius": "*radius_lg", "block_shadow": "none", "block_shadow_dark": "none", "block_title_background_fill": "none", "block_title_background_fill_dark": "none", "block_title_border_color": "none", "block_title_border_color_dark": "none", "block_title_border_width": "0px", "block_title_border_width_dark": "0px", "block_title_padding": "0", "block_title_radius": "none", "block_title_text_color": "*neutral_500", "block_title_text_color_dark": "*neutral_200", "block_title_text_size": "*text_md", "block_title_text_weight": "400", "body_background_fill": "*background_fill_primary", "body_background_fill_dark": "*background_fill_primary", "body_text_color": "*neutral_800", "body_text_color_dark": "*neutral_100", "body_text_color_subdued": "*neutral_400", "body_text_color_subdued_dark": "*neutral_400", "body_text_size": "*text_md", "body_text_weight": "400", "border_color_accent": "*primary_300", "border_color_accent_dark": "*neutral_600", "border_color_accent_subdued": "*border_color_accent", "border_color_accent_subdued_dark": "*border_color_accent", "border_color_primary": "*neutral_200", "border_color_primary_dark": "*neutral_700", "button_border_width": "*input_border_width", "button_border_width_dark": "*input_border_width", "button_cancel_background_fill": "*button_secondary_background_fill", "button_cancel_background_fill_dark": "*button_secondary_background_fill", "button_cancel_background_fill_hover": "*button_cancel_background_fill", "button_cancel_background_fill_hover_dark": "*button_cancel_background_fill", "button_cancel_border_color": "*button_secondary_border_color", "button_cancel_border_color_dark": "*button_secondary_border_color", "button_cancel_border_color_hover": "*button_cancel_border_color", "button_cancel_border_color_hover_dark": "*button_cancel_border_color", "button_cancel_text_color": "*button_secondary_text_color", "button_cancel_text_color_dark": "*button_secondary_text_color", "button_cancel_text_color_hover": "*button_cancel_text_color", "button_cancel_text_color_hover_dark": "*button_cancel_text_color", "button_large_padding": "*spacing_lg calc(2 * *spacing_lg)", "button_large_radius": "*radius_lg", "button_large_text_size": "*text_lg", "button_large_text_weight": "600", "button_primary_background_fill": "*primary_200", "button_primary_background_fill_dark": "*primary_700", "button_primary_background_fill_hover": "*button_primary_background_fill", "button_primary_background_fill_hover_dark": "*button_primary_background_fill", "button_primary_border_color": "*primary_200", "button_primary_border_color_dark": "*primary_600", "button_primary_border_color_hover": "*button_primary_border_color", "button_primary_border_color_hover_dark": "*button_primary_border_color", "button_primary_text_color": "*primary_600", "button_primary_text_color_dark": "white", "button_primary_text_color_hover": "*button_primary_text_color", "button_primary_text_color_hover_dark": "*button_primary_text_color", "button_secondary_background_fill": "*neutral_200", "button_secondary_background_fill_dark": "*neutral_600", "button_secondary_background_fill_hover": "*button_secondary_background_fill", "button_secondary_background_fill_hover_dark": "*button_secondary_background_fill", "button_secondary_border_color": "*neutral_200", "button_secondary_border_color_dark": "*neutral_600", "button_secondary_border_color_hover": "*button_secondary_border_color", "button_secondary_border_color_hover_dark": "*button_secondary_border_color", "button_secondary_text_color": "*neutral_700", "button_secondary_text_color_dark": "white", "button_secondary_text_color_hover": "*button_secondary_text_color", "button_secondary_text_color_hover_dark": "*button_secondary_text_color", "button_shadow": "none", "button_shadow_active": "none", "button_shadow_hover": "none", "button_small_padding": "*spacing_sm calc(2 * *spacing_sm)", "button_small_radius": "*radius_lg", "button_small_text_size": "*text_md", "button_small_text_weight": "400", "button_transition": "background-color 0.2s ease", "checkbox_background_color": "*background_fill_primary", "checkbox_background_color_dark": "*neutral_800", "checkbox_background_color_focus": "*checkbox_background_color", "checkbox_background_color_focus_dark": "*checkbox_background_color", "checkbox_background_color_hover": "*checkbox_background_color", "checkbox_background_color_hover_dark": "*checkbox_background_color", "checkbox_background_color_selected": "*secondary_600", "checkbox_background_color_selected_dark": "*secondary_600", "checkbox_border_color": "*neutral_300", "checkbox_border_color_dark": "*neutral_700", "checkbox_border_color_focus": "*secondary_500", "checkbox_border_color_focus_dark": "*secondary_500", "checkbox_border_color_hover": "*neutral_300", "checkbox_border_color_hover_dark": "*neutral_600", "checkbox_border_color_selected": "*secondary_600", "checkbox_border_color_selected_dark": "*secondary_600", "checkbox_border_radius": "*radius_sm", "checkbox_border_width": "*input_border_width", "checkbox_border_width_dark": "*input_border_width", "checkbox_check": "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e\")", "checkbox_label_background_fill": "*button_secondary_background_fill", "checkbox_label_background_fill_dark": "*button_secondary_background_fill", "checkbox_label_background_fill_hover": "*button_secondary_background_fill_hover", "checkbox_label_background_fill_hover_dark": "*button_secondary_background_fill_hover", "checkbox_label_background_fill_selected": "*checkbox_label_background_fill", "checkbox_label_background_fill_selected_dark": "*checkbox_label_background_fill", "checkbox_label_border_color": "*border_color_primary", "checkbox_label_border_color_dark": "*border_color_primary", "checkbox_label_border_color_hover": "*checkbox_label_border_color", "checkbox_label_border_color_hover_dark": "*checkbox_label_border_color", "checkbox_label_border_width": "*input_border_width", "checkbox_label_border_width_dark": "*input_border_width", "checkbox_label_gap": "*spacing_lg", "checkbox_label_padding": "*spacing_md calc(2 * *spacing_md)", "checkbox_label_shadow": "none", "checkbox_label_text_color": "*body_text_color", "checkbox_label_text_color_dark": "*body_text_color", "checkbox_label_text_color_selected": "*checkbox_label_text_color", "checkbox_label_text_color_selected_dark": "*checkbox_label_text_color", "checkbox_label_text_size": "*text_md", "checkbox_label_text_weight": "400", "checkbox_shadow": "*input_shadow", "code_background_fill": "*neutral_100", "code_background_fill_dark": "*neutral_800", "color_accent": "*primary_500", "color_accent_soft": "*primary_50", "color_accent_soft_dark": "*neutral_700", "container_radius": "*radius_lg", "embed_radius": "*radius_lg", "error_background_fill": "#fef2f2", "error_background_fill_dark": "*background_fill_primary", "error_border_color": "#b91c1c", "error_border_color_dark": "#ef4444", "error_border_width": "1px", "error_border_width_dark": "1px", "error_icon_color": "#b91c1c", "error_icon_color_dark": "#ef4444", "error_text_color": "#b91c1c", "error_text_color_dark": "#fef2f2", "font": "'Source Sans Pro', 'ui-sans-serif', 'system-ui', sans-serif", "font_mono": "'IBM Plex Mono', 'ui-monospace', 'Consolas', monospace", "form_gap_width": "0px", "input_background_fill": "*neutral_100", "input_background_fill_dark": "*neutral_700", "input_background_fill_focus": "*secondary_500", "input_background_fill_focus_dark": "*secondary_600", "input_background_fill_hover": "*input_background_fill", "input_background_fill_hover_dark": "*input_background_fill", "input_border_color": "*border_color_primary", "input_border_color_dark": "*border_color_primary", "input_border_color_focus": "*secondary_300", "input_border_color_focus_dark": "*neutral_700", "input_border_color_hover": "*input_border_color", "input_border_color_hover_dark": "*input_border_color", "input_border_width": "0px", "input_border_width_dark": "0px", "input_padding": "*spacing_xl", "input_placeholder_color": "*neutral_400", "input_placeholder_color_dark": "*neutral_500", "input_radius": "*radius_lg", "input_shadow": "none", "input_shadow_dark": "none", "input_shadow_focus": "*input_shadow", "input_shadow_focus_dark": "*input_shadow", "input_text_size": "*text_md", "input_text_weight": "400", "layout_gap": "*spacing_xxl", "link_text_color": "*secondary_600", "link_text_color_active": "*secondary_600", "link_text_color_active_dark": "*secondary_500", "link_text_color_dark": "*secondary_500", "link_text_color_hover": "*secondary_700", "link_text_color_hover_dark": "*secondary_400", "link_text_color_visited": "*secondary_500", "link_text_color_visited_dark": "*secondary_600", "loader_color": "*color_accent", "loader_color_dark": "*color_accent", "name": "base", "neutral_100": "#f3f4f6", "neutral_200": "#e5e7eb", "neutral_300": "#d1d5db", "neutral_400": "#9ca3af", "neutral_50": "#f9fafb", "neutral_500": "#6b7280", "neutral_600": "#4b5563", "neutral_700": "#374151", "neutral_800": "#1f2937", "neutral_900": "#111827", "neutral_950": "#0b0f19", "panel_background_fill": "*background_fill_secondary", "panel_background_fill_dark": "*background_fill_secondary", "panel_border_color": "*border_color_primary", "panel_border_color_dark": "*border_color_primary", "panel_border_width": "0", "panel_border_width_dark": "0", "primary_100": "#ede9fe", "primary_200": "#ddd6fe", "primary_300": "#c4b5fd", "primary_400": "#a78bfa", "primary_50": "#f5f3ff", "primary_500": "#8b5cf6", "primary_600": "#7c3aed", "primary_700": "#6d28d9", "primary_800": "#5b21b6", "primary_900": "#4c1d95", "primary_950": "#431d7f", "prose_header_text_weight": "600", "prose_text_size": "*text_md", "prose_text_weight": "400", "radio_circle": "url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e\")", "radius_lg": "12px", "radius_md": "8px", "radius_sm": "6px", "radius_xl": "16px", "radius_xs": "4px", "radius_xxl": "24px", "radius_xxs": "2px", "secondary_100": "#dcfce7", "secondary_200": "#bbf7d0", "secondary_300": "#86efac", "secondary_400": "#4ade80", "secondary_50": "#f0fdf4", "secondary_500": "#22c55e", "secondary_600": "#16a34a", "secondary_700": "#15803d", "secondary_800": "#166534", "secondary_900": "#14532d", "secondary_950": "#134e28", "section_header_text_size": "*text_md", "section_header_text_weight": "400", "shadow_drop": "rgba(0,0,0,0.05) 0px 1px 2px 0px", "shadow_drop_lg": "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)", "shadow_inset": "rgba(0,0,0,0.05) 0px 2px 4px 0px inset", "shadow_spread": "3px", "shadow_spread_dark": "1px", "slider_color": "#2563eb", "slider_color_dark": "#2563eb", "spacing_lg": "6px", "spacing_md": "4px", "spacing_sm": "2px", "spacing_xl": "9px", "spacing_xs": "1px", "spacing_xxl": "12px", "spacing_xxs": "1px", "stat_background_fill": "*primary_300", "stat_background_fill_dark": "*primary_500", "table_border_color": "*neutral_300", "table_border_color_dark": "*neutral_700", "table_even_background_fill": "white", "table_even_background_fill_dark": "*neutral_950", "table_odd_background_fill": "*neutral_50", "table_odd_background_fill_dark": "*neutral_900", "table_radius": "*radius_lg", "table_row_focus": "*color_accent_soft", "table_row_focus_dark": "*color_accent_soft", "text_lg": "16px", "text_md": "13px", "text_sm": "11px", "text_xl": "20px", "text_xs": "9px", "text_xxl": "24px", "text_xxs": "8px"}, "version": "0.0.1"}