|
from __future__ import annotations |
|
|
|
import os |
|
import sys |
|
import time |
|
import importlib |
|
import signal |
|
import re |
|
import warnings |
|
import json |
|
from threading import Thread |
|
from typing import Iterable |
|
|
|
from fastapi import FastAPI |
|
from fastapi.middleware.cors import CORSMiddleware |
|
from fastapi.middleware.gzip import GZipMiddleware |
|
from packaging import version |
|
|
|
import logging |
|
|
|
|
|
log_level = os.environ.get("SD_WEBUI_LOG_LEVEL") |
|
if log_level: |
|
log_level = getattr(logging, log_level.upper(), None) or logging.INFO |
|
logging.basicConfig( |
|
level=log_level, |
|
format='%(asctime)s %(levelname)s [%(name)s] %(message)s', |
|
datefmt='%Y-%m-%d %H:%M:%S', |
|
) |
|
|
|
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) |
|
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) |
|
|
|
from modules import timer |
|
startup_timer = timer.startup_timer |
|
startup_timer.record("launcher") |
|
|
|
import torch |
|
import pytorch_lightning |
|
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") |
|
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") |
|
startup_timer.record("import torch") |
|
|
|
import gradio |
|
startup_timer.record("import gradio") |
|
|
|
from modules import paths, timer, import_hook, errors, devices |
|
startup_timer.record("setup paths") |
|
|
|
import ldm.modules.encoders.modules |
|
startup_timer.record("import ldm") |
|
|
|
from modules import extra_networks |
|
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock |
|
|
|
|
|
if ".dev" in torch.__version__ or "+git" in torch.__version__: |
|
torch.__long_version__ = torch.__version__ |
|
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) |
|
|
|
from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states |
|
import modules.codeformer_model as codeformer |
|
import modules.face_restoration |
|
import modules.gfpgan_model as gfpgan |
|
import modules.img2img |
|
|
|
import modules.lowvram |
|
import modules.scripts |
|
import modules.sd_hijack |
|
import modules.sd_hijack_optimizations |
|
import modules.sd_models |
|
import modules.sd_vae |
|
import modules.sd_unet |
|
import modules.txt2img |
|
import modules.script_callbacks |
|
import modules.textual_inversion.textual_inversion |
|
import modules.progress |
|
|
|
import modules.ui |
|
from modules import modelloader |
|
from modules.shared import cmd_opts |
|
import modules.hypernetworks.hypernetwork |
|
|
|
startup_timer.record("other imports") |
|
|
|
|
|
if cmd_opts.server_name: |
|
server_name = cmd_opts.server_name |
|
else: |
|
server_name = "0.0.0.0" if cmd_opts.listen else None |
|
|
|
|
|
def fix_asyncio_event_loop_policy(): |
|
""" |
|
The default `asyncio` event loop policy only automatically creates |
|
event loops in the main threads. Other threads must create event |
|
loops explicitly or `asyncio.get_event_loop` (and therefore |
|
`.IOLoop.current`) will fail. Installing this policy allows event |
|
loops to be created automatically on any thread, matching the |
|
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). |
|
""" |
|
|
|
import asyncio |
|
|
|
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): |
|
|
|
|
|
_BasePolicy = asyncio.WindowsSelectorEventLoopPolicy |
|
else: |
|
_BasePolicy = asyncio.DefaultEventLoopPolicy |
|
|
|
class AnyThreadEventLoopPolicy(_BasePolicy): |
|
"""Event loop policy that allows loop creation on any thread. |
|
Usage:: |
|
|
|
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) |
|
""" |
|
|
|
def get_event_loop(self) -> asyncio.AbstractEventLoop: |
|
try: |
|
return super().get_event_loop() |
|
except (RuntimeError, AssertionError): |
|
|
|
|
|
|
|
loop = self.new_event_loop() |
|
self.set_event_loop(loop) |
|
return loop |
|
|
|
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) |
|
|
|
|
|
def check_versions(): |
|
if shared.cmd_opts.skip_version_check: |
|
return |
|
|
|
expected_torch_version = "2.0.0" |
|
|
|
if version.parse(torch.__version__) < version.parse(expected_torch_version): |
|
errors.print_error_explanation(f""" |
|
You are running torch {torch.__version__}. |
|
The program is tested to work with torch {expected_torch_version}. |
|
To reinstall the desired version, run with commandline flag --reinstall-torch. |
|
Beware that this will cause a lot of large files to be downloaded, as well as |
|
there are reports of issues with training tab on the latest version. |
|
|
|
Use --skip-version-check commandline argument to disable this check. |
|
""".strip()) |
|
|
|
expected_xformers_version = "0.0.20" |
|
if shared.xformers_available: |
|
import xformers |
|
|
|
if version.parse(xformers.__version__) < version.parse(expected_xformers_version): |
|
errors.print_error_explanation(f""" |
|
You are running xformers {xformers.__version__}. |
|
The program is tested to work with xformers {expected_xformers_version}. |
|
To reinstall the desired version, run with commandline flag --reinstall-xformers. |
|
|
|
Use --skip-version-check commandline argument to disable this check. |
|
""".strip()) |
|
|
|
|
|
def restore_config_state_file(): |
|
config_state_file = shared.opts.restore_config_state_file |
|
if config_state_file == "": |
|
return |
|
|
|
shared.opts.restore_config_state_file = "" |
|
shared.opts.save(shared.config_filename) |
|
|
|
if os.path.isfile(config_state_file): |
|
print(f"*** About to restore extension state from file: {config_state_file}") |
|
with open(config_state_file, "r", encoding="utf-8") as f: |
|
config_state = json.load(f) |
|
config_states.restore_extension_config(config_state) |
|
startup_timer.record("restore extension config") |
|
elif config_state_file: |
|
print(f"!!! Config state backup not found: {config_state_file}") |
|
|
|
|
|
def validate_tls_options(): |
|
if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile): |
|
return |
|
|
|
try: |
|
if not os.path.exists(cmd_opts.tls_keyfile): |
|
print("Invalid path to TLS keyfile given") |
|
if not os.path.exists(cmd_opts.tls_certfile): |
|
print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'") |
|
except TypeError: |
|
cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None |
|
print("TLS setup invalid, running webui without TLS") |
|
else: |
|
print("Running with TLS") |
|
startup_timer.record("TLS") |
|
|
|
|
|
def get_gradio_auth_creds() -> Iterable[tuple[str, ...]]: |
|
""" |
|
Convert the gradio_auth and gradio_auth_path commandline arguments into |
|
an iterable of (username, password) tuples. |
|
""" |
|
def process_credential_line(s) -> tuple[str, ...] | None: |
|
s = s.strip() |
|
if not s: |
|
return None |
|
return tuple(s.split(':', 1)) |
|
|
|
if cmd_opts.gradio_auth: |
|
for cred in cmd_opts.gradio_auth.split(','): |
|
cred = process_credential_line(cred) |
|
if cred: |
|
yield cred |
|
|
|
if cmd_opts.gradio_auth_path: |
|
with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file: |
|
for line in file.readlines(): |
|
for cred in line.strip().split(','): |
|
cred = process_credential_line(cred) |
|
if cred: |
|
yield cred |
|
|
|
|
|
def configure_sigint_handler(): |
|
|
|
def sigint_handler(sig, frame): |
|
print(f'Interrupted with signal {sig} in {frame}') |
|
os._exit(0) |
|
|
|
if not os.environ.get("COVERAGE_RUN"): |
|
|
|
|
|
signal.signal(signal.SIGINT, sigint_handler) |
|
|
|
|
|
def configure_opts_onchange(): |
|
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False) |
|
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) |
|
shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) |
|
shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) |
|
shared.opts.onchange("gradio_theme", shared.reload_gradio_theme) |
|
shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False) |
|
startup_timer.record("opts onchange") |
|
|
|
|
|
def initialize(): |
|
fix_asyncio_event_loop_policy() |
|
validate_tls_options() |
|
configure_sigint_handler() |
|
check_versions() |
|
modelloader.cleanup_models() |
|
configure_opts_onchange() |
|
|
|
modules.sd_models.setup_model() |
|
startup_timer.record("setup SD model") |
|
|
|
codeformer.setup_model(cmd_opts.codeformer_models_path) |
|
startup_timer.record("setup codeformer") |
|
|
|
gfpgan.setup_model(cmd_opts.gfpgan_models_path) |
|
startup_timer.record("setup gfpgan") |
|
|
|
initialize_rest(reload_script_modules=False) |
|
|
|
|
|
def initialize_rest(*, reload_script_modules=False): |
|
""" |
|
Called both from initialize() and when reloading the webui. |
|
""" |
|
sd_samplers.set_samplers() |
|
extensions.list_extensions() |
|
startup_timer.record("list extensions") |
|
|
|
restore_config_state_file() |
|
|
|
if cmd_opts.ui_debug_mode: |
|
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers |
|
modules.scripts.load_scripts() |
|
return |
|
|
|
modules.sd_models.list_models() |
|
startup_timer.record("list SD models") |
|
|
|
localization.list_localizations(cmd_opts.localizations_dir) |
|
|
|
with startup_timer.subcategory("load scripts"): |
|
modules.scripts.load_scripts() |
|
|
|
if reload_script_modules: |
|
for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: |
|
importlib.reload(module) |
|
startup_timer.record("reload script modules") |
|
|
|
modelloader.load_upscalers() |
|
startup_timer.record("load upscalers") |
|
|
|
modules.sd_vae.refresh_vae_list() |
|
startup_timer.record("refresh VAE") |
|
modules.textual_inversion.textual_inversion.list_textual_inversion_templates() |
|
startup_timer.record("refresh textual inversion templates") |
|
|
|
modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers) |
|
modules.sd_hijack.list_optimizers() |
|
startup_timer.record("scripts list_optimizers") |
|
|
|
modules.sd_unet.list_unets() |
|
startup_timer.record("scripts list_unets") |
|
|
|
def load_model(): |
|
""" |
|
Accesses shared.sd_model property to load model. |
|
After it's available, if it has been loaded before this access by some extension, |
|
its optimization may be None because the list of optimizaers has neet been filled |
|
by that time, so we apply optimization again. |
|
""" |
|
|
|
shared.sd_model |
|
|
|
if modules.sd_hijack.current_optimizer is None: |
|
modules.sd_hijack.apply_optimizations() |
|
|
|
Thread(target=load_model).start() |
|
|
|
Thread(target=devices.first_time_calculation).start() |
|
|
|
shared.reload_hypernetworks() |
|
startup_timer.record("reload hypernetworks") |
|
|
|
ui_extra_networks.initialize() |
|
ui_extra_networks.register_default_pages() |
|
|
|
extra_networks.initialize() |
|
extra_networks.register_default_extra_networks() |
|
startup_timer.record("initialize extra networks") |
|
|
|
|
|
def setup_middleware(app): |
|
app.middleware_stack = None |
|
app.add_middleware(GZipMiddleware, minimum_size=1000) |
|
configure_cors_middleware(app) |
|
app.build_middleware_stack() |
|
|
|
|
|
def configure_cors_middleware(app): |
|
cors_options = { |
|
"allow_methods": ["*"], |
|
"allow_headers": ["*"], |
|
"allow_credentials": True, |
|
} |
|
if cmd_opts.cors_allow_origins: |
|
cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',') |
|
if cmd_opts.cors_allow_origins_regex: |
|
cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex |
|
app.add_middleware(CORSMiddleware, **cors_options) |
|
|
|
|
|
def create_api(app): |
|
from modules.api.api import Api |
|
api = Api(app, queue_lock) |
|
return api |
|
|
|
|
|
def api_only(): |
|
initialize() |
|
|
|
app = FastAPI() |
|
setup_middleware(app) |
|
api = create_api(app) |
|
|
|
modules.script_callbacks.app_started_callback(None, app) |
|
|
|
print(f"Startup time: {startup_timer.summary()}.") |
|
api.launch( |
|
server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", |
|
port=cmd_opts.port if cmd_opts.port else 7861, |
|
root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "" |
|
) |
|
|
|
|
|
def webui(): |
|
launch_api = cmd_opts.api |
|
initialize() |
|
|
|
while 1: |
|
if shared.opts.clean_temp_dir_at_start: |
|
ui_tempdir.cleanup_tmpdr() |
|
startup_timer.record("cleanup temp dir") |
|
|
|
modules.script_callbacks.before_ui_callback() |
|
startup_timer.record("scripts before_ui_callback") |
|
|
|
shared.demo = modules.ui.create_ui() |
|
startup_timer.record("create ui") |
|
|
|
if not cmd_opts.no_gradio_queue: |
|
shared.demo.queue(64) |
|
|
|
gradio_auth_creds = list(get_gradio_auth_creds()) or None |
|
|
|
app, local_url, share_url = shared.demo.launch( |
|
share=cmd_opts.share, |
|
server_name=server_name, |
|
server_port=cmd_opts.port, |
|
ssl_keyfile=cmd_opts.tls_keyfile, |
|
ssl_certfile=cmd_opts.tls_certfile, |
|
ssl_verify=cmd_opts.disable_tls_verify, |
|
debug=cmd_opts.gradio_debug, |
|
auth=gradio_auth_creds, |
|
inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1', |
|
prevent_thread_lock=True, |
|
allowed_paths=cmd_opts.gradio_allowed_path, |
|
app_kwargs={ |
|
"docs_url": "/docs", |
|
"redoc_url": "/redoc", |
|
}, |
|
root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "", |
|
) |
|
|
|
|
|
cmd_opts.autolaunch = False |
|
|
|
startup_timer.record("gradio launch") |
|
|
|
|
|
|
|
|
|
|
|
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware'] |
|
|
|
setup_middleware(app) |
|
|
|
modules.progress.setup_progress_api(app) |
|
modules.ui.setup_ui_api(app) |
|
|
|
if launch_api: |
|
create_api(app) |
|
|
|
ui_extra_networks.add_pages_to_demo(app) |
|
|
|
startup_timer.record("add APIs") |
|
|
|
with startup_timer.subcategory("app_started_callback"): |
|
modules.script_callbacks.app_started_callback(shared.demo, app) |
|
|
|
timer.startup_record = startup_timer.dump() |
|
print(f"Startup time: {startup_timer.summary()}.") |
|
|
|
try: |
|
while True: |
|
server_command = shared.state.wait_for_server_command(timeout=5) |
|
if server_command: |
|
if server_command in ("stop", "restart"): |
|
break |
|
else: |
|
print(f"Unknown server command: {server_command}") |
|
except KeyboardInterrupt: |
|
print('Caught KeyboardInterrupt, stopping...') |
|
server_command = "stop" |
|
|
|
if server_command == "stop": |
|
print("Stopping server...") |
|
|
|
shared.demo.close() |
|
break |
|
|
|
print('Restarting UI...') |
|
shared.demo.close() |
|
time.sleep(0.5) |
|
startup_timer.reset() |
|
modules.script_callbacks.app_reload_callback() |
|
startup_timer.record("app reload callback") |
|
modules.script_callbacks.script_unloaded_callback() |
|
startup_timer.record("scripts unloaded callback") |
|
initialize_rest(reload_script_modules=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
if cmd_opts.nowebui: |
|
api_only() |
|
else: |
|
webui() |
|
|