toto10 commited on
Commit
4cca094
1 Parent(s): c12e284

d26e48afae727a16149600d9a8cdb5607e526b5ef6a156db24a34aad6e706740

Browse files
Files changed (50) hide show
  1. modules/__pycache__/scripts_postprocessing.cpython-310.pyc +0 -0
  2. modules/__pycache__/sd_disable_initialization.cpython-310.pyc +0 -0
  3. modules/__pycache__/sd_hijack.cpython-310.pyc +0 -0
  4. modules/__pycache__/sd_hijack_checkpoint.cpython-310.pyc +0 -0
  5. modules/__pycache__/sd_hijack_clip.cpython-310.pyc +0 -0
  6. modules/__pycache__/sd_hijack_inpainting.cpython-310.pyc +0 -0
  7. modules/__pycache__/sd_hijack_open_clip.cpython-310.pyc +0 -0
  8. modules/__pycache__/sd_hijack_optimizations.cpython-310.pyc +0 -0
  9. modules/__pycache__/sd_hijack_unet.cpython-310.pyc +0 -0
  10. modules/__pycache__/sd_hijack_utils.cpython-310.pyc +0 -0
  11. modules/__pycache__/sd_hijack_xlmr.cpython-310.pyc +0 -0
  12. modules/__pycache__/sd_models.cpython-310.pyc +0 -0
  13. modules/__pycache__/sd_models_config.cpython-310.pyc +0 -0
  14. modules/__pycache__/sd_models_xl.cpython-310.pyc +0 -0
  15. modules/__pycache__/sd_samplers.cpython-310.pyc +0 -0
  16. modules/__pycache__/sd_samplers_common.cpython-310.pyc +0 -0
  17. modules/__pycache__/sd_samplers_compvis.cpython-310.pyc +0 -0
  18. modules/__pycache__/sd_samplers_kdiffusion.cpython-310.pyc +0 -0
  19. modules/__pycache__/sd_unet.cpython-310.pyc +0 -0
  20. modules/__pycache__/sd_vae.cpython-310.pyc +0 -0
  21. modules/__pycache__/sd_vae_approx.cpython-310.pyc +0 -0
  22. modules/__pycache__/sd_vae_taesd.cpython-310.pyc +0 -0
  23. modules/__pycache__/shared.cpython-310.pyc +0 -0
  24. modules/__pycache__/shared_items.cpython-310.pyc +0 -0
  25. modules/__pycache__/styles.cpython-310.pyc +0 -0
  26. modules/__pycache__/sub_quadratic_attention.cpython-310.pyc +0 -0
  27. modules/__pycache__/sysinfo.cpython-310.pyc +0 -0
  28. modules/__pycache__/timer.cpython-310.pyc +0 -0
  29. modules/__pycache__/txt2img.cpython-310.pyc +0 -0
  30. modules/__pycache__/ui.cpython-310.pyc +0 -0
  31. modules/__pycache__/ui_common.cpython-310.pyc +0 -0
  32. modules/__pycache__/ui_components.cpython-310.pyc +0 -0
  33. modules/__pycache__/ui_extensions.cpython-310.pyc +0 -0
  34. modules/__pycache__/ui_extra_networks.cpython-310.pyc +0 -0
  35. modules/__pycache__/ui_extra_networks_checkpoints.cpython-310.pyc +0 -0
  36. modules/__pycache__/ui_extra_networks_hypernets.cpython-310.pyc +0 -0
  37. modules/__pycache__/ui_extra_networks_textual_inversion.cpython-310.pyc +0 -0
  38. modules/__pycache__/ui_extra_networks_user_metadata.cpython-310.pyc +0 -0
  39. modules/__pycache__/ui_gradio_extensions.cpython-310.pyc +0 -0
  40. modules/__pycache__/ui_loadsave.cpython-310.pyc +0 -0
  41. modules/__pycache__/ui_postprocessing.cpython-310.pyc +0 -0
  42. modules/__pycache__/ui_settings.cpython-310.pyc +0 -0
  43. modules/__pycache__/ui_tempdir.cpython-310.pyc +0 -0
  44. modules/__pycache__/upscaler.cpython-310.pyc +0 -0
  45. modules/__pycache__/xlmr.cpython-310.pyc +0 -0
  46. modules/api/__pycache__/api.cpython-310.pyc +0 -0
  47. modules/api/__pycache__/models.cpython-310.pyc +0 -0
  48. modules/api/api.py +742 -0
  49. modules/api/models.py +314 -0
  50. modules/cache.py +120 -0
modules/__pycache__/scripts_postprocessing.cpython-310.pyc ADDED
Binary file (5.58 kB). View file
 
modules/__pycache__/sd_disable_initialization.cpython-310.pyc ADDED
Binary file (4.67 kB). View file
 
modules/__pycache__/sd_hijack.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
modules/__pycache__/sd_hijack_checkpoint.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
modules/__pycache__/sd_hijack_clip.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
modules/__pycache__/sd_hijack_inpainting.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
modules/__pycache__/sd_hijack_open_clip.cpython-310.pyc ADDED
Binary file (3.46 kB). View file
 
modules/__pycache__/sd_hijack_optimizations.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
modules/__pycache__/sd_hijack_unet.cpython-310.pyc ADDED
Binary file (5.13 kB). View file
 
modules/__pycache__/sd_hijack_utils.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
modules/__pycache__/sd_hijack_xlmr.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
modules/__pycache__/sd_models.cpython-310.pyc ADDED
Binary file (18.5 kB). View file
 
modules/__pycache__/sd_models_config.cpython-310.pyc ADDED
Binary file (4.16 kB). View file
 
modules/__pycache__/sd_models_xl.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
modules/__pycache__/sd_samplers.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
modules/__pycache__/sd_samplers_common.cpython-310.pyc ADDED
Binary file (3.6 kB). View file
 
modules/__pycache__/sd_samplers_compvis.cpython-310.pyc ADDED
Binary file (8.05 kB). View file
 
modules/__pycache__/sd_samplers_kdiffusion.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
modules/__pycache__/sd_unet.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
modules/__pycache__/sd_vae.cpython-310.pyc ADDED
Binary file (5.2 kB). View file
 
modules/__pycache__/sd_vae_approx.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
modules/__pycache__/sd_vae_taesd.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
modules/__pycache__/shared.cpython-310.pyc ADDED
Binary file (47 kB). View file
 
modules/__pycache__/shared_items.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
modules/__pycache__/styles.cpython-310.pyc ADDED
Binary file (5 kB). View file
 
modules/__pycache__/sub_quadratic_attention.cpython-310.pyc ADDED
Binary file (6.35 kB). View file
 
modules/__pycache__/sysinfo.cpython-310.pyc ADDED
Binary file (5.59 kB). View file
 
modules/__pycache__/timer.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
modules/__pycache__/txt2img.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
modules/__pycache__/ui.cpython-310.pyc ADDED
Binary file (73.7 kB). View file
 
modules/__pycache__/ui_common.cpython-310.pyc ADDED
Binary file (7.77 kB). View file
 
modules/__pycache__/ui_components.cpython-310.pyc ADDED
Binary file (3.38 kB). View file
 
modules/__pycache__/ui_extensions.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
modules/__pycache__/ui_extra_networks.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
modules/__pycache__/ui_extra_networks_checkpoints.cpython-310.pyc ADDED
Binary file (2.25 kB). View file
 
modules/__pycache__/ui_extra_networks_hypernets.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
modules/__pycache__/ui_extra_networks_textual_inversion.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
modules/__pycache__/ui_extra_networks_user_metadata.cpython-310.pyc ADDED
Binary file (7.37 kB). View file
 
modules/__pycache__/ui_gradio_extensions.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
modules/__pycache__/ui_loadsave.cpython-310.pyc ADDED
Binary file (7.4 kB). View file
 
modules/__pycache__/ui_postprocessing.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
modules/__pycache__/ui_settings.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
modules/__pycache__/ui_tempdir.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
modules/__pycache__/upscaler.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
modules/__pycache__/xlmr.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
modules/api/__pycache__/api.cpython-310.pyc ADDED
Binary file (28.5 kB). View file
 
modules/api/__pycache__/models.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
modules/api/api.py ADDED
@@ -0,0 +1,742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import os
4
+ import time
5
+ import datetime
6
+ import uvicorn
7
+ import gradio as gr
8
+ from threading import Lock
9
+ from io import BytesIO
10
+ from fastapi import APIRouter, Depends, FastAPI, Request, Response
11
+ from fastapi.security import HTTPBasic, HTTPBasicCredentials
12
+ from fastapi.exceptions import HTTPException
13
+ from fastapi.responses import JSONResponse
14
+ from fastapi.encoders import jsonable_encoder
15
+ from secrets import compare_digest
16
+
17
+ import modules.shared as shared
18
+ from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart
19
+ from modules.api import models
20
+ from modules.shared import opts
21
+ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
22
+ from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
23
+ from modules.textual_inversion.preprocess import preprocess
24
+ from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
25
+ from PIL import PngImagePlugin,Image
26
+ from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_aliases
27
+ from modules.sd_vae import vae_dict
28
+ from modules.sd_models_config import find_checkpoint_config_near_filename
29
+ from modules.realesrgan_model import get_realesrgan_models
30
+ from modules import devices
31
+ from typing import Dict, List, Any
32
+ import piexif
33
+ import piexif.helper
34
+ from contextlib import closing
35
+
36
+
37
+ def script_name_to_index(name, scripts):
38
+ try:
39
+ return [script.title().lower() for script in scripts].index(name.lower())
40
+ except Exception as e:
41
+ raise HTTPException(status_code=422, detail=f"Script '{name}' not found") from e
42
+
43
+
44
+ def validate_sampler_name(name):
45
+ config = sd_samplers.all_samplers_map.get(name, None)
46
+ if config is None:
47
+ raise HTTPException(status_code=404, detail="Sampler not found")
48
+
49
+ return name
50
+
51
+
52
+ def setUpscalers(req: dict):
53
+ reqDict = vars(req)
54
+ reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None)
55
+ reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None)
56
+ return reqDict
57
+
58
+
59
+ def decode_base64_to_image(encoding):
60
+ if encoding.startswith("data:image/"):
61
+ encoding = encoding.split(";")[1].split(",")[1]
62
+ try:
63
+ image = Image.open(BytesIO(base64.b64decode(encoding)))
64
+ return image
65
+ except Exception as e:
66
+ raise HTTPException(status_code=500, detail="Invalid encoded image") from e
67
+
68
+
69
+ def encode_pil_to_base64(image):
70
+ with io.BytesIO() as output_bytes:
71
+
72
+ if opts.samples_format.lower() == 'png':
73
+ use_metadata = False
74
+ metadata = PngImagePlugin.PngInfo()
75
+ for key, value in image.info.items():
76
+ if isinstance(key, str) and isinstance(value, str):
77
+ metadata.add_text(key, value)
78
+ use_metadata = True
79
+ image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
80
+
81
+ elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
82
+ if image.mode == "RGBA":
83
+ image = image.convert("RGB")
84
+ parameters = image.info.get('parameters', None)
85
+ exif_bytes = piexif.dump({
86
+ "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") }
87
+ })
88
+ if opts.samples_format.lower() in ("jpg", "jpeg"):
89
+ image.save(output_bytes, format="JPEG", exif = exif_bytes, quality=opts.jpeg_quality)
90
+ else:
91
+ image.save(output_bytes, format="WEBP", exif = exif_bytes, quality=opts.jpeg_quality)
92
+
93
+ else:
94
+ raise HTTPException(status_code=500, detail="Invalid image format")
95
+
96
+ bytes_data = output_bytes.getvalue()
97
+
98
+ return base64.b64encode(bytes_data)
99
+
100
+
101
+ def api_middleware(app: FastAPI):
102
+ rich_available = False
103
+ try:
104
+ if os.environ.get('WEBUI_RICH_EXCEPTIONS', None) is not None:
105
+ import anyio # importing just so it can be placed on silent list
106
+ import starlette # importing just so it can be placed on silent list
107
+ from rich.console import Console
108
+ console = Console()
109
+ rich_available = True
110
+ except Exception:
111
+ pass
112
+
113
+ @app.middleware("http")
114
+ async def log_and_time(req: Request, call_next):
115
+ ts = time.time()
116
+ res: Response = await call_next(req)
117
+ duration = str(round(time.time() - ts, 4))
118
+ res.headers["X-Process-Time"] = duration
119
+ endpoint = req.scope.get('path', 'err')
120
+ if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'):
121
+ print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format(
122
+ t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
123
+ code=res.status_code,
124
+ ver=req.scope.get('http_version', '0.0'),
125
+ cli=req.scope.get('client', ('0:0.0.0', 0))[0],
126
+ prot=req.scope.get('scheme', 'err'),
127
+ method=req.scope.get('method', 'err'),
128
+ endpoint=endpoint,
129
+ duration=duration,
130
+ ))
131
+ return res
132
+
133
+ def handle_exception(request: Request, e: Exception):
134
+ err = {
135
+ "error": type(e).__name__,
136
+ "detail": vars(e).get('detail', ''),
137
+ "body": vars(e).get('body', ''),
138
+ "errors": str(e),
139
+ }
140
+ if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
141
+ message = f"API error: {request.method}: {request.url} {err}"
142
+ if rich_available:
143
+ print(message)
144
+ console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
145
+ else:
146
+ errors.report(message, exc_info=True)
147
+ return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
148
+
149
+ @app.middleware("http")
150
+ async def exception_handling(request: Request, call_next):
151
+ try:
152
+ return await call_next(request)
153
+ except Exception as e:
154
+ return handle_exception(request, e)
155
+
156
+ @app.exception_handler(Exception)
157
+ async def fastapi_exception_handler(request: Request, e: Exception):
158
+ return handle_exception(request, e)
159
+
160
+ @app.exception_handler(HTTPException)
161
+ async def http_exception_handler(request: Request, e: HTTPException):
162
+ return handle_exception(request, e)
163
+
164
+
165
+ class Api:
166
+ def __init__(self, app: FastAPI, queue_lock: Lock):
167
+ if shared.cmd_opts.api_auth:
168
+ self.credentials = {}
169
+ for auth in shared.cmd_opts.api_auth.split(","):
170
+ user, password = auth.split(":")
171
+ self.credentials[user] = password
172
+
173
+ self.router = APIRouter()
174
+ self.app = app
175
+ self.queue_lock = queue_lock
176
+ api_middleware(self.app)
177
+ self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse)
178
+ self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse)
179
+ self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse)
180
+ self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse)
181
+ self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse)
182
+ self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse)
183
+ self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
184
+ self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
185
+ self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"])
186
+ self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
187
+ self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
188
+ self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
189
+ self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
190
+ self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
191
+ self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=List[models.LatentUpscalerModeItem])
192
+ self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
193
+ self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem])
194
+ self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
195
+ self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
196
+ self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
197
+ self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
198
+ self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
199
+ self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
200
+ self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
201
+ self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
202
+ self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
203
+ self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
204
+ self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
205
+ self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
206
+ self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
207
+ self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
208
+ self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
209
+ self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo])
210
+
211
+ if shared.cmd_opts.api_server_stop:
212
+ self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"])
213
+ self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"])
214
+ self.add_api_route("/sdapi/v1/server-stop", self.stop_webui, methods=["POST"])
215
+
216
+ self.default_script_arg_txt2img = []
217
+ self.default_script_arg_img2img = []
218
+
219
+ def add_api_route(self, path: str, endpoint, **kwargs):
220
+ if shared.cmd_opts.api_auth:
221
+ return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs)
222
+ return self.app.add_api_route(path, endpoint, **kwargs)
223
+
224
+ def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())):
225
+ if credentials.username in self.credentials:
226
+ if compare_digest(credentials.password, self.credentials[credentials.username]):
227
+ return True
228
+
229
+ raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"})
230
+
231
+ def get_selectable_script(self, script_name, script_runner):
232
+ if script_name is None or script_name == "":
233
+ return None, None
234
+
235
+ script_idx = script_name_to_index(script_name, script_runner.selectable_scripts)
236
+ script = script_runner.selectable_scripts[script_idx]
237
+ return script, script_idx
238
+
239
+ def get_scripts_list(self):
240
+ t2ilist = [script.name for script in scripts.scripts_txt2img.scripts if script.name is not None]
241
+ i2ilist = [script.name for script in scripts.scripts_img2img.scripts if script.name is not None]
242
+
243
+ return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist)
244
+
245
+ def get_script_info(self):
246
+ res = []
247
+
248
+ for script_list in [scripts.scripts_txt2img.scripts, scripts.scripts_img2img.scripts]:
249
+ res += [script.api_info for script in script_list if script.api_info is not None]
250
+
251
+ return res
252
+
253
+ def get_script(self, script_name, script_runner):
254
+ if script_name is None or script_name == "":
255
+ return None, None
256
+
257
+ script_idx = script_name_to_index(script_name, script_runner.scripts)
258
+ return script_runner.scripts[script_idx]
259
+
260
+ def init_default_script_args(self, script_runner):
261
+ #find max idx from the scripts in runner and generate a none array to init script_args
262
+ last_arg_index = 1
263
+ for script in script_runner.scripts:
264
+ if last_arg_index < script.args_to:
265
+ last_arg_index = script.args_to
266
+ # None everywhere except position 0 to initialize script args
267
+ script_args = [None]*last_arg_index
268
+ script_args[0] = 0
269
+
270
+ # get default values
271
+ with gr.Blocks(): # will throw errors calling ui function without this
272
+ for script in script_runner.scripts:
273
+ if script.ui(script.is_img2img):
274
+ ui_default_values = []
275
+ for elem in script.ui(script.is_img2img):
276
+ ui_default_values.append(elem.value)
277
+ script_args[script.args_from:script.args_to] = ui_default_values
278
+ return script_args
279
+
280
+ def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner):
281
+ script_args = default_script_args.copy()
282
+ # position 0 in script_arg is the idx+1 of the selectable script that is going to be run when using scripts.scripts_*2img.run()
283
+ if selectable_scripts:
284
+ script_args[selectable_scripts.args_from:selectable_scripts.args_to] = request.script_args
285
+ script_args[0] = selectable_idx + 1
286
+
287
+ # Now check for always on scripts
288
+ if request.alwayson_scripts:
289
+ for alwayson_script_name in request.alwayson_scripts.keys():
290
+ alwayson_script = self.get_script(alwayson_script_name, script_runner)
291
+ if alwayson_script is None:
292
+ raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
293
+ # Selectable script in always on script param check
294
+ if alwayson_script.alwayson is False:
295
+ raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
296
+ # always on script with no arg should always run so you don't really need to add them to the requests
297
+ if "args" in request.alwayson_scripts[alwayson_script_name]:
298
+ # min between arg length in scriptrunner and arg length in the request
299
+ for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))):
300
+ script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
301
+ return script_args
302
+
303
+ def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
304
+ script_runner = scripts.scripts_txt2img
305
+ if not script_runner.scripts:
306
+ script_runner.initialize_scripts(False)
307
+ ui.create_ui()
308
+ if not self.default_script_arg_txt2img:
309
+ self.default_script_arg_txt2img = self.init_default_script_args(script_runner)
310
+ selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
311
+
312
+ populate = txt2imgreq.copy(update={ # Override __init__ params
313
+ "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
314
+ "do_not_save_samples": not txt2imgreq.save_images,
315
+ "do_not_save_grid": not txt2imgreq.save_images,
316
+ })
317
+ if populate.sampler_name:
318
+ populate.sampler_index = None # prevent a warning later on
319
+
320
+ args = vars(populate)
321
+ args.pop('script_name', None)
322
+ args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
323
+ args.pop('alwayson_scripts', None)
324
+
325
+ script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner)
326
+
327
+ send_images = args.pop('send_images', True)
328
+ args.pop('save_images', None)
329
+
330
+ with self.queue_lock:
331
+ with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p:
332
+ p.scripts = script_runner
333
+ p.outpath_grids = opts.outdir_txt2img_grids
334
+ p.outpath_samples = opts.outdir_txt2img_samples
335
+
336
+ try:
337
+ shared.state.begin(job="scripts_txt2img")
338
+ if selectable_scripts is not None:
339
+ p.script_args = script_args
340
+ processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
341
+ else:
342
+ p.script_args = tuple(script_args) # Need to pass args as tuple here
343
+ processed = process_images(p)
344
+ finally:
345
+ shared.state.end()
346
+
347
+ b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
348
+
349
+ return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
350
+
351
+ def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
352
+ init_images = img2imgreq.init_images
353
+ if init_images is None:
354
+ raise HTTPException(status_code=404, detail="Init image not found")
355
+
356
+ mask = img2imgreq.mask
357
+ if mask:
358
+ mask = decode_base64_to_image(mask)
359
+
360
+ script_runner = scripts.scripts_img2img
361
+ if not script_runner.scripts:
362
+ script_runner.initialize_scripts(True)
363
+ ui.create_ui()
364
+ if not self.default_script_arg_img2img:
365
+ self.default_script_arg_img2img = self.init_default_script_args(script_runner)
366
+ selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
367
+
368
+ populate = img2imgreq.copy(update={ # Override __init__ params
369
+ "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
370
+ "do_not_save_samples": not img2imgreq.save_images,
371
+ "do_not_save_grid": not img2imgreq.save_images,
372
+ "mask": mask,
373
+ })
374
+ if populate.sampler_name:
375
+ populate.sampler_index = None # prevent a warning later on
376
+
377
+ args = vars(populate)
378
+ args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
379
+ args.pop('script_name', None)
380
+ args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
381
+ args.pop('alwayson_scripts', None)
382
+
383
+ script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner)
384
+
385
+ send_images = args.pop('send_images', True)
386
+ args.pop('save_images', None)
387
+
388
+ with self.queue_lock:
389
+ with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p:
390
+ p.init_images = [decode_base64_to_image(x) for x in init_images]
391
+ p.scripts = script_runner
392
+ p.outpath_grids = opts.outdir_img2img_grids
393
+ p.outpath_samples = opts.outdir_img2img_samples
394
+
395
+ try:
396
+ shared.state.begin(job="scripts_img2img")
397
+ if selectable_scripts is not None:
398
+ p.script_args = script_args
399
+ processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
400
+ else:
401
+ p.script_args = tuple(script_args) # Need to pass args as tuple here
402
+ processed = process_images(p)
403
+ finally:
404
+ shared.state.end()
405
+
406
+ b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
407
+
408
+ if not img2imgreq.include_init_images:
409
+ img2imgreq.init_images = None
410
+ img2imgreq.mask = None
411
+
412
+ return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
413
+
414
+ def extras_single_image_api(self, req: models.ExtrasSingleImageRequest):
415
+ reqDict = setUpscalers(req)
416
+
417
+ reqDict['image'] = decode_base64_to_image(reqDict['image'])
418
+
419
+ with self.queue_lock:
420
+ result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
421
+
422
+ return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
423
+
424
+ def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest):
425
+ reqDict = setUpscalers(req)
426
+
427
+ image_list = reqDict.pop('imageList', [])
428
+ image_folder = [decode_base64_to_image(x.data) for x in image_list]
429
+
430
+ with self.queue_lock:
431
+ result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict)
432
+
433
+ return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
434
+
435
+ def pnginfoapi(self, req: models.PNGInfoRequest):
436
+ if(not req.image.strip()):
437
+ return models.PNGInfoResponse(info="")
438
+
439
+ image = decode_base64_to_image(req.image.strip())
440
+ if image is None:
441
+ return models.PNGInfoResponse(info="")
442
+
443
+ geninfo, items = images.read_info_from_image(image)
444
+ if geninfo is None:
445
+ geninfo = ""
446
+
447
+ items = {**{'parameters': geninfo}, **items}
448
+
449
+ return models.PNGInfoResponse(info=geninfo, items=items)
450
+
451
+ def progressapi(self, req: models.ProgressRequest = Depends()):
452
+ # copy from check_progress_call of ui.py
453
+
454
+ if shared.state.job_count == 0:
455
+ return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
456
+
457
+ # avoid dividing zero
458
+ progress = 0.01
459
+
460
+ if shared.state.job_count > 0:
461
+ progress += shared.state.job_no / shared.state.job_count
462
+ if shared.state.sampling_steps > 0:
463
+ progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
464
+
465
+ time_since_start = time.time() - shared.state.time_start
466
+ eta = (time_since_start/progress)
467
+ eta_relative = eta-time_since_start
468
+
469
+ progress = min(progress, 1)
470
+
471
+ shared.state.set_current_image()
472
+
473
+ current_image = None
474
+ if shared.state.current_image and not req.skip_current_image:
475
+ current_image = encode_pil_to_base64(shared.state.current_image)
476
+
477
+ return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
478
+
479
+ def interrogateapi(self, interrogatereq: models.InterrogateRequest):
480
+ image_b64 = interrogatereq.image
481
+ if image_b64 is None:
482
+ raise HTTPException(status_code=404, detail="Image not found")
483
+
484
+ img = decode_base64_to_image(image_b64)
485
+ img = img.convert('RGB')
486
+
487
+ # Override object param
488
+ with self.queue_lock:
489
+ if interrogatereq.model == "clip":
490
+ processed = shared.interrogator.interrogate(img)
491
+ elif interrogatereq.model == "deepdanbooru":
492
+ processed = deepbooru.model.tag(img)
493
+ else:
494
+ raise HTTPException(status_code=404, detail="Model not found")
495
+
496
+ return models.InterrogateResponse(caption=processed)
497
+
498
+ def interruptapi(self):
499
+ shared.state.interrupt()
500
+
501
+ return {}
502
+
503
+ def unloadapi(self):
504
+ unload_model_weights()
505
+
506
+ return {}
507
+
508
+ def reloadapi(self):
509
+ reload_model_weights()
510
+
511
+ return {}
512
+
513
+ def skip(self):
514
+ shared.state.skip()
515
+
516
+ def get_config(self):
517
+ options = {}
518
+ for key in shared.opts.data.keys():
519
+ metadata = shared.opts.data_labels.get(key)
520
+ if(metadata is not None):
521
+ options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)})
522
+ else:
523
+ options.update({key: shared.opts.data.get(key, None)})
524
+
525
+ return options
526
+
527
+ def set_config(self, req: Dict[str, Any]):
528
+ checkpoint_name = req.get("sd_model_checkpoint", None)
529
+ if checkpoint_name is not None and checkpoint_name not in checkpoint_aliases:
530
+ raise RuntimeError(f"model {checkpoint_name!r} not found")
531
+
532
+ for k, v in req.items():
533
+ shared.opts.set(k, v)
534
+
535
+ shared.opts.save(shared.config_filename)
536
+ return
537
+
538
+ def get_cmd_flags(self):
539
+ return vars(shared.cmd_opts)
540
+
541
+ def get_samplers(self):
542
+ return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
543
+
544
+ def get_upscalers(self):
545
+ return [
546
+ {
547
+ "name": upscaler.name,
548
+ "model_name": upscaler.scaler.model_name,
549
+ "model_path": upscaler.data_path,
550
+ "model_url": None,
551
+ "scale": upscaler.scale,
552
+ }
553
+ for upscaler in shared.sd_upscalers
554
+ ]
555
+
556
+ def get_latent_upscale_modes(self):
557
+ return [
558
+ {
559
+ "name": upscale_mode,
560
+ }
561
+ for upscale_mode in [*(shared.latent_upscale_modes or {})]
562
+ ]
563
+
564
+ def get_sd_models(self):
565
+ return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
566
+
567
+ def get_sd_vaes(self):
568
+ return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()]
569
+
570
+ def get_hypernetworks(self):
571
+ return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
572
+
573
+ def get_face_restorers(self):
574
+ return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers]
575
+
576
+ def get_realesrgan_models(self):
577
+ return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)]
578
+
579
+ def get_prompt_styles(self):
580
+ styleList = []
581
+ for k in shared.prompt_styles.styles:
582
+ style = shared.prompt_styles.styles[k]
583
+ styleList.append({"name":style[0], "prompt": style[1], "negative_prompt": style[2]})
584
+
585
+ return styleList
586
+
587
+ def get_embeddings(self):
588
+ db = sd_hijack.model_hijack.embedding_db
589
+
590
+ def convert_embedding(embedding):
591
+ return {
592
+ "step": embedding.step,
593
+ "sd_checkpoint": embedding.sd_checkpoint,
594
+ "sd_checkpoint_name": embedding.sd_checkpoint_name,
595
+ "shape": embedding.shape,
596
+ "vectors": embedding.vectors,
597
+ }
598
+
599
+ def convert_embeddings(embeddings):
600
+ return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()}
601
+
602
+ return {
603
+ "loaded": convert_embeddings(db.word_embeddings),
604
+ "skipped": convert_embeddings(db.skipped_embeddings),
605
+ }
606
+
607
+ def refresh_checkpoints(self):
608
+ with self.queue_lock:
609
+ shared.refresh_checkpoints()
610
+
611
+ def create_embedding(self, args: dict):
612
+ try:
613
+ shared.state.begin(job="create_embedding")
614
+ filename = create_embedding(**args) # create empty embedding
615
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
616
+ return models.CreateResponse(info=f"create embedding filename: {filename}")
617
+ except AssertionError as e:
618
+ return models.TrainResponse(info=f"create embedding error: {e}")
619
+ finally:
620
+ shared.state.end()
621
+
622
+
623
+ def create_hypernetwork(self, args: dict):
624
+ try:
625
+ shared.state.begin(job="create_hypernetwork")
626
+ filename = create_hypernetwork(**args) # create empty embedding
627
+ return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
628
+ except AssertionError as e:
629
+ return models.TrainResponse(info=f"create hypernetwork error: {e}")
630
+ finally:
631
+ shared.state.end()
632
+
633
+ def preprocess(self, args: dict):
634
+ try:
635
+ shared.state.begin(job="preprocess")
636
+ preprocess(**args) # quick operation unless blip/booru interrogation is enabled
637
+ shared.state.end()
638
+ return models.PreprocessResponse(info='preprocess complete')
639
+ except KeyError as e:
640
+ return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
641
+ except Exception as e:
642
+ return models.PreprocessResponse(info=f"preprocess error: {e}")
643
+ finally:
644
+ shared.state.end()
645
+
646
+ def train_embedding(self, args: dict):
647
+ try:
648
+ shared.state.begin(job="train_embedding")
649
+ apply_optimizations = shared.opts.training_xattention_optimizations
650
+ error = None
651
+ filename = ''
652
+ if not apply_optimizations:
653
+ sd_hijack.undo_optimizations()
654
+ try:
655
+ embedding, filename = train_embedding(**args) # can take a long time to complete
656
+ except Exception as e:
657
+ error = e
658
+ finally:
659
+ if not apply_optimizations:
660
+ sd_hijack.apply_optimizations()
661
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
662
+ except Exception as msg:
663
+ return models.TrainResponse(info=f"train embedding error: {msg}")
664
+ finally:
665
+ shared.state.end()
666
+
667
+ def train_hypernetwork(self, args: dict):
668
+ try:
669
+ shared.state.begin(job="train_hypernetwork")
670
+ shared.loaded_hypernetworks = []
671
+ apply_optimizations = shared.opts.training_xattention_optimizations
672
+ error = None
673
+ filename = ''
674
+ if not apply_optimizations:
675
+ sd_hijack.undo_optimizations()
676
+ try:
677
+ hypernetwork, filename = train_hypernetwork(**args)
678
+ except Exception as e:
679
+ error = e
680
+ finally:
681
+ shared.sd_model.cond_stage_model.to(devices.device)
682
+ shared.sd_model.first_stage_model.to(devices.device)
683
+ if not apply_optimizations:
684
+ sd_hijack.apply_optimizations()
685
+ shared.state.end()
686
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
687
+ except Exception as exc:
688
+ return models.TrainResponse(info=f"train embedding error: {exc}")
689
+ finally:
690
+ shared.state.end()
691
+
692
+ def get_memory(self):
693
+ try:
694
+ import os
695
+ import psutil
696
+ process = psutil.Process(os.getpid())
697
+ res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
698
+ ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
699
+ ram = { 'free': ram_total - res.rss, 'used': res.rss, 'total': ram_total }
700
+ except Exception as err:
701
+ ram = { 'error': f'{err}' }
702
+ try:
703
+ import torch
704
+ if torch.cuda.is_available():
705
+ s = torch.cuda.mem_get_info()
706
+ system = { 'free': s[0], 'used': s[1] - s[0], 'total': s[1] }
707
+ s = dict(torch.cuda.memory_stats(shared.device))
708
+ allocated = { 'current': s['allocated_bytes.all.current'], 'peak': s['allocated_bytes.all.peak'] }
709
+ reserved = { 'current': s['reserved_bytes.all.current'], 'peak': s['reserved_bytes.all.peak'] }
710
+ active = { 'current': s['active_bytes.all.current'], 'peak': s['active_bytes.all.peak'] }
711
+ inactive = { 'current': s['inactive_split_bytes.all.current'], 'peak': s['inactive_split_bytes.all.peak'] }
712
+ warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] }
713
+ cuda = {
714
+ 'system': system,
715
+ 'active': active,
716
+ 'allocated': allocated,
717
+ 'reserved': reserved,
718
+ 'inactive': inactive,
719
+ 'events': warnings,
720
+ }
721
+ else:
722
+ cuda = {'error': 'unavailable'}
723
+ except Exception as err:
724
+ cuda = {'error': f'{err}'}
725
+ return models.MemoryResponse(ram=ram, cuda=cuda)
726
+
727
+ def launch(self, server_name, port, root_path):
728
+ self.app.include_router(self.router)
729
+ uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path)
730
+
731
+ def kill_webui(self):
732
+ restart.stop_program()
733
+
734
+ def restart_webui(self):
735
+ if restart.is_restartable():
736
+ restart.restart_program()
737
+ return Response(status_code=501)
738
+
739
+ def stop_webui(request):
740
+ shared.state.server_command = "stop"
741
+ return Response("Stopping.")
742
+
modules/api/models.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from pydantic import BaseModel, Field, create_model
4
+ from typing import Any, Optional
5
+ from typing_extensions import Literal
6
+ from inflection import underscore
7
+ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
8
+ from modules.shared import sd_upscalers, opts, parser
9
+ from typing import Dict, List
10
+
11
+ API_NOT_ALLOWED = [
12
+ "self",
13
+ "kwargs",
14
+ "sd_model",
15
+ "outpath_samples",
16
+ "outpath_grids",
17
+ "sampler_index",
18
+ # "do_not_save_samples",
19
+ # "do_not_save_grid",
20
+ "extra_generation_params",
21
+ "overlay_images",
22
+ "do_not_reload_embeddings",
23
+ "seed_enable_extras",
24
+ "prompt_for_display",
25
+ "sampler_noise_scheduler_override",
26
+ "ddim_discretize"
27
+ ]
28
+
29
+ class ModelDef(BaseModel):
30
+ """Assistance Class for Pydantic Dynamic Model Generation"""
31
+
32
+ field: str
33
+ field_alias: str
34
+ field_type: Any
35
+ field_value: Any
36
+ field_exclude: bool = False
37
+
38
+
39
+ class PydanticModelGenerator:
40
+ """
41
+ Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about:
42
+ source_data is a snapshot of the default values produced by the class
43
+ params are the names of the actual keys required by __init__
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ model_name: str = None,
49
+ class_instance = None,
50
+ additional_fields = None,
51
+ ):
52
+ def field_type_generator(k, v):
53
+ # field_type = str if not overrides.get(k) else overrides[k]["type"]
54
+ # print(k, v.annotation, v.default)
55
+ field_type = v.annotation
56
+
57
+ return Optional[field_type]
58
+
59
+ def merge_class_params(class_):
60
+ all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_)))
61
+ parameters = {}
62
+ for classes in all_classes:
63
+ parameters = {**parameters, **inspect.signature(classes.__init__).parameters}
64
+ return parameters
65
+
66
+
67
+ self._model_name = model_name
68
+ self._class_data = merge_class_params(class_instance)
69
+
70
+ self._model_def = [
71
+ ModelDef(
72
+ field=underscore(k),
73
+ field_alias=k,
74
+ field_type=field_type_generator(k, v),
75
+ field_value=v.default
76
+ )
77
+ for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED
78
+ ]
79
+
80
+ for fields in additional_fields:
81
+ self._model_def.append(ModelDef(
82
+ field=underscore(fields["key"]),
83
+ field_alias=fields["key"],
84
+ field_type=fields["type"],
85
+ field_value=fields["default"],
86
+ field_exclude=fields["exclude"] if "exclude" in fields else False))
87
+
88
+ def generate_model(self):
89
+ """
90
+ Creates a pydantic BaseModel
91
+ from the json and overrides provided at initialization
92
+ """
93
+ fields = {
94
+ d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def
95
+ }
96
+ DynamicModel = create_model(self._model_name, **fields)
97
+ DynamicModel.__config__.allow_population_by_field_name = True
98
+ DynamicModel.__config__.allow_mutation = True
99
+ return DynamicModel
100
+
101
+ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
102
+ "StableDiffusionProcessingTxt2Img",
103
+ StableDiffusionProcessingTxt2Img,
104
+ [
105
+ {"key": "sampler_index", "type": str, "default": "Euler"},
106
+ {"key": "script_name", "type": str, "default": None},
107
+ {"key": "script_args", "type": list, "default": []},
108
+ {"key": "send_images", "type": bool, "default": True},
109
+ {"key": "save_images", "type": bool, "default": False},
110
+ {"key": "alwayson_scripts", "type": dict, "default": {}},
111
+ ]
112
+ ).generate_model()
113
+
114
+ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
115
+ "StableDiffusionProcessingImg2Img",
116
+ StableDiffusionProcessingImg2Img,
117
+ [
118
+ {"key": "sampler_index", "type": str, "default": "Euler"},
119
+ {"key": "init_images", "type": list, "default": None},
120
+ {"key": "denoising_strength", "type": float, "default": 0.75},
121
+ {"key": "mask", "type": str, "default": None},
122
+ {"key": "include_init_images", "type": bool, "default": False, "exclude" : True},
123
+ {"key": "script_name", "type": str, "default": None},
124
+ {"key": "script_args", "type": list, "default": []},
125
+ {"key": "send_images", "type": bool, "default": True},
126
+ {"key": "save_images", "type": bool, "default": False},
127
+ {"key": "alwayson_scripts", "type": dict, "default": {}},
128
+ ]
129
+ ).generate_model()
130
+
131
+ class TextToImageResponse(BaseModel):
132
+ images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
133
+ parameters: dict
134
+ info: str
135
+
136
+ class ImageToImageResponse(BaseModel):
137
+ images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
138
+ parameters: dict
139
+ info: str
140
+
141
+ class ExtrasBaseRequest(BaseModel):
142
+ resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.")
143
+ show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?")
144
+ gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
145
+ codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
146
+ codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
147
+ upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
148
+ upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
149
+ upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
150
+ upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
151
+ upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
152
+ upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
153
+ extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
154
+ upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?")
155
+
156
+ class ExtraBaseResponse(BaseModel):
157
+ html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
158
+
159
+ class ExtrasSingleImageRequest(ExtrasBaseRequest):
160
+ image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
161
+
162
+ class ExtrasSingleImageResponse(ExtraBaseResponse):
163
+ image: str = Field(default=None, title="Image", description="The generated image in base64 format.")
164
+
165
+ class FileData(BaseModel):
166
+ data: str = Field(title="File data", description="Base64 representation of the file")
167
+ name: str = Field(title="File name")
168
+
169
+ class ExtrasBatchImagesRequest(ExtrasBaseRequest):
170
+ imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
171
+
172
+ class ExtrasBatchImagesResponse(ExtraBaseResponse):
173
+ images: List[str] = Field(title="Images", description="The generated images in base64 format.")
174
+
175
+ class PNGInfoRequest(BaseModel):
176
+ image: str = Field(title="Image", description="The base64 encoded PNG image")
177
+
178
+ class PNGInfoResponse(BaseModel):
179
+ info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
180
+ items: dict = Field(title="Items", description="An object containing all the info the image had")
181
+
182
+ class ProgressRequest(BaseModel):
183
+ skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
184
+
185
+ class ProgressResponse(BaseModel):
186
+ progress: float = Field(title="Progress", description="The progress with a range of 0 to 1")
187
+ eta_relative: float = Field(title="ETA in secs")
188
+ state: dict = Field(title="State", description="The current state snapshot")
189
+ current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
190
+ textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.")
191
+
192
+ class InterrogateRequest(BaseModel):
193
+ image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
194
+ model: str = Field(default="clip", title="Model", description="The interrogate model used.")
195
+
196
+ class InterrogateResponse(BaseModel):
197
+ caption: str = Field(default=None, title="Caption", description="The generated caption for the image.")
198
+
199
+ class TrainResponse(BaseModel):
200
+ info: str = Field(title="Train info", description="Response string from train embedding or hypernetwork task.")
201
+
202
+ class CreateResponse(BaseModel):
203
+ info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
204
+
205
+ class PreprocessResponse(BaseModel):
206
+ info: str = Field(title="Preprocess info", description="Response string from preprocessing task.")
207
+
208
+ fields = {}
209
+ for key, metadata in opts.data_labels.items():
210
+ value = opts.data.get(key)
211
+ optType = opts.typemap.get(type(metadata.default), type(metadata.default))
212
+
213
+ if metadata.default is None:
214
+ pass
215
+ elif metadata is not None:
216
+ fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
217
+ else:
218
+ fields.update({key: (Optional[optType], Field())})
219
+
220
+ OptionsModel = create_model("Options", **fields)
221
+
222
+ flags = {}
223
+ _options = vars(parser)['_option_string_actions']
224
+ for key in _options:
225
+ if(_options[key].dest != 'help'):
226
+ flag = _options[key]
227
+ _type = str
228
+ if _options[key].default is not None:
229
+ _type = type(_options[key].default)
230
+ flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))})
231
+
232
+ FlagsModel = create_model("Flags", **flags)
233
+
234
+ class SamplerItem(BaseModel):
235
+ name: str = Field(title="Name")
236
+ aliases: List[str] = Field(title="Aliases")
237
+ options: Dict[str, str] = Field(title="Options")
238
+
239
+ class UpscalerItem(BaseModel):
240
+ name: str = Field(title="Name")
241
+ model_name: Optional[str] = Field(title="Model Name")
242
+ model_path: Optional[str] = Field(title="Path")
243
+ model_url: Optional[str] = Field(title="URL")
244
+ scale: Optional[float] = Field(title="Scale")
245
+
246
+ class LatentUpscalerModeItem(BaseModel):
247
+ name: str = Field(title="Name")
248
+
249
+ class SDModelItem(BaseModel):
250
+ title: str = Field(title="Title")
251
+ model_name: str = Field(title="Model Name")
252
+ hash: Optional[str] = Field(title="Short hash")
253
+ sha256: Optional[str] = Field(title="sha256 hash")
254
+ filename: str = Field(title="Filename")
255
+ config: Optional[str] = Field(title="Config file")
256
+
257
+ class SDVaeItem(BaseModel):
258
+ model_name: str = Field(title="Model Name")
259
+ filename: str = Field(title="Filename")
260
+
261
+ class HypernetworkItem(BaseModel):
262
+ name: str = Field(title="Name")
263
+ path: Optional[str] = Field(title="Path")
264
+
265
+ class FaceRestorerItem(BaseModel):
266
+ name: str = Field(title="Name")
267
+ cmd_dir: Optional[str] = Field(title="Path")
268
+
269
+ class RealesrganItem(BaseModel):
270
+ name: str = Field(title="Name")
271
+ path: Optional[str] = Field(title="Path")
272
+ scale: Optional[int] = Field(title="Scale")
273
+
274
+ class PromptStyleItem(BaseModel):
275
+ name: str = Field(title="Name")
276
+ prompt: Optional[str] = Field(title="Prompt")
277
+ negative_prompt: Optional[str] = Field(title="Negative Prompt")
278
+
279
+
280
+ class EmbeddingItem(BaseModel):
281
+ step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
282
+ sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available")
283
+ sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead")
284
+ shape: int = Field(title="Shape", description="The length of each individual vector in the embedding")
285
+ vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
286
+
287
+ class EmbeddingsResponse(BaseModel):
288
+ loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
289
+ skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
290
+
291
+ class MemoryResponse(BaseModel):
292
+ ram: dict = Field(title="RAM", description="System memory stats")
293
+ cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats")
294
+
295
+
296
+ class ScriptsList(BaseModel):
297
+ txt2img: list = Field(default=None, title="Txt2img", description="Titles of scripts (txt2img)")
298
+ img2img: list = Field(default=None, title="Img2img", description="Titles of scripts (img2img)")
299
+
300
+
301
+ class ScriptArg(BaseModel):
302
+ label: str = Field(default=None, title="Label", description="Name of the argument in UI")
303
+ value: Optional[Any] = Field(default=None, title="Value", description="Default value of the argument")
304
+ minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI")
305
+ maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI")
306
+ step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI")
307
+ choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
308
+
309
+
310
+ class ScriptInfo(BaseModel):
311
+ name: str = Field(default=None, title="Name", description="Script name")
312
+ is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script")
313
+ is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script")
314
+ args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments")
modules/cache.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os.path
3
+ import threading
4
+ import time
5
+
6
+ from modules.paths import data_path, script_path
7
+
8
+ cache_filename = os.path.join(data_path, "cache.json")
9
+ cache_data = None
10
+ cache_lock = threading.Lock()
11
+
12
+ dump_cache_after = None
13
+ dump_cache_thread = None
14
+
15
+
16
+ def dump_cache():
17
+ """
18
+ Marks cache for writing to disk. 5 seconds after no one else flags the cache for writing, it is written.
19
+ """
20
+
21
+ global dump_cache_after
22
+ global dump_cache_thread
23
+
24
+ def thread_func():
25
+ global dump_cache_after
26
+ global dump_cache_thread
27
+
28
+ while dump_cache_after is not None and time.time() < dump_cache_after:
29
+ time.sleep(1)
30
+
31
+ with cache_lock:
32
+ with open(cache_filename, "w", encoding="utf8") as file:
33
+ json.dump(cache_data, file, indent=4)
34
+
35
+ dump_cache_after = None
36
+ dump_cache_thread = None
37
+
38
+ with cache_lock:
39
+ dump_cache_after = time.time() + 5
40
+ if dump_cache_thread is None:
41
+ dump_cache_thread = threading.Thread(name='cache-writer', target=thread_func)
42
+ dump_cache_thread.start()
43
+
44
+
45
+ def cache(subsection):
46
+ """
47
+ Retrieves or initializes a cache for a specific subsection.
48
+
49
+ Parameters:
50
+ subsection (str): The subsection identifier for the cache.
51
+
52
+ Returns:
53
+ dict: The cache data for the specified subsection.
54
+ """
55
+
56
+ global cache_data
57
+
58
+ if cache_data is None:
59
+ with cache_lock:
60
+ if cache_data is None:
61
+ if not os.path.isfile(cache_filename):
62
+ cache_data = {}
63
+ else:
64
+ try:
65
+ with open(cache_filename, "r", encoding="utf8") as file:
66
+ cache_data = json.load(file)
67
+ except Exception:
68
+ os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
69
+ print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache')
70
+ cache_data = {}
71
+
72
+ s = cache_data.get(subsection, {})
73
+ cache_data[subsection] = s
74
+
75
+ return s
76
+
77
+
78
+ def cached_data_for_file(subsection, title, filename, func):
79
+ """
80
+ Retrieves or generates data for a specific file, using a caching mechanism.
81
+
82
+ Parameters:
83
+ subsection (str): The subsection of the cache to use.
84
+ title (str): The title of the data entry in the subsection of the cache.
85
+ filename (str): The path to the file to be checked for modifications.
86
+ func (callable): A function that generates the data if it is not available in the cache.
87
+
88
+ Returns:
89
+ dict or None: The cached or generated data, or None if data generation fails.
90
+
91
+ The `cached_data_for_file` function implements a caching mechanism for data stored in files.
92
+ It checks if the data associated with the given `title` is present in the cache and compares the
93
+ modification time of the file with the cached modification time. If the file has been modified,
94
+ the cache is considered invalid and the data is regenerated using the provided `func`.
95
+ Otherwise, the cached data is returned.
96
+
97
+ If the data generation fails, None is returned to indicate the failure. Otherwise, the generated
98
+ or cached data is returned as a dictionary.
99
+ """
100
+
101
+ existing_cache = cache(subsection)
102
+ ondisk_mtime = os.path.getmtime(filename)
103
+
104
+ entry = existing_cache.get(title)
105
+ if entry:
106
+ cached_mtime = entry.get("mtime", 0)
107
+ if ondisk_mtime > cached_mtime:
108
+ entry = None
109
+
110
+ if not entry or 'value' not in entry:
111
+ value = func()
112
+ if value is None:
113
+ return None
114
+
115
+ entry = {'mtime': ondisk_mtime, 'value': value}
116
+ existing_cache[title] = entry
117
+
118
+ dump_cache()
119
+
120
+ return entry['value']