import torch import numpy as np import gradio as gr import spaces import cv2 import os from typing import Dict from PIL import Image from huggingface_hub import Repository engine_repo = Repository(local_dir="engine", clone_from="felixrosberg/EngageDiffusion", use_auth_token=os.environ['model_fetch']) from engine.ui_model import fetch_model, run_model from engine.ui_gradio import fetch_ui pipe = fetch_model() pipe.to('cuda') @spaces.GPU def inference(user_state, condition_image, settings, prompt, neg_prompt, inference_steps=8, num_images=2, guidance_scale=2.0, guidance_rescale=0.0, clip_skip=0, enable_freeu=False, height=1024, width=1024, condition_scale=0.5, sketch_detail=1.0, sketch_softness=0.5, inpaint_strength=0.9, exposure=0.0, enable_stylation=False, style_1_down=0.0, style_1_mid=0.0, style_1_up=0.0, style_2_down=0.0, style_2_mid=0.0, style_2_up=0.0, style_3_down=0.0, style_3_mid=0.0, style_3_up=0.0, style_4_down=0.0, style_4_mid=0.0, style_4_up=0.0, style_5_down=0.0, style_5_mid=0.0, style_5_up=0.0, seed=None, progress=gr.Progress()): images = run_model(pipe, user_state, condition_image, settings, prompt, neg_prompt, inference_steps, num_images, guidance_scale, guidance_rescale, clip_skip, enable_freeu, height, width, condition_scale, sketch_detail, sketch_softness, inpaint_strength, exposure, enable_stylation, style_1_down, style_1_mid, style_1_up, style_2_down, style_2_mid, style_2_up, style_3_down, style_3_mid, style_3_up, style_4_down, style_4_mid, style_4_up, style_5_down, style_5_mid, style_5_up, seed, progress) user_state["IMAGE_GALLERY"] += images return user_state["IMAGE_GALLERY"], user_state engage_demo = fetch_ui(inference) engage_demo.launch()