import gradio as gr import requests import json import PIL.Image from io import BytesIO import os import random def generate_image(prompt, negative_prompt, scheduler, steps, width, height, restore_faces, seed, cfg): # Define the API endpoint apiUrl = os.getenv("API_URL") token = os.getenv("API_TOKEN") # Define the request headers headers = { "Content-Type": "application/json", "token": "token" } # Define the request body body = { "mode": "url", "model": "AOM3A1B_orangemixs.safetensors", "tiling": False, "batch_size": 1, "prompt": prompt, "negative_prompt": negative_prompt, "seed": seed if seed else random.randint(0, 999999999), "scheduler": scheduler, "n_iter": 1, "steps": steps, "cfg": cfg, "offset_noise": 0.0, "width": width, "height": height, "clip_skip": 1, "loras": [{"name": "", "strength": 1.0}], "embeddings": [{"name": "", "strength": 1.0}], "vae": "vae-ft-mse-840000-ema-pruned.ckpt", "restore_faces": restore_faces, "fr_model": "CodeFormer", "codeformer_weight": 0.5, "enable_hr": False, "denoising_strength": 0.75, "hr_scale": 2, "hr_upscale": "None", "img2img_ref_img_type": "piece", "img2img_resize_mode": 0, "img2img_denoising_strength": 0.75, "controlnet_enabled": False, "controlnet_ref_img_type": "piece", "controlnet_guessmode": False, "controlnet_module": "canny", "controlnet_model": "control_v11p_sd15_softedge", "controlnet_weight": 1, "controlnet_guidance_start": 0, "controlnet_guidance_end": 1, "controlnet_ref_img_url": "https://upload.wikimedia.org/wikipedia/commons/d/d1/Image_not_available.png", "controlnet_mask": [], "controlnet_resize_mode": "Scale to Fit (Inner Fit)", "controlnet_lowvram": False, "controlnet_processor_res": 512, "controlnet_threshold_a": 100, "controlnet_threshold_b": 200 } # Send the request response = requests.post(apiUrl, headers=headers, data=json.dumps(body)) # Check the response status if response.status_code == 200: # Get the image URL from the response image_url = json.loads(response.text)['results'][0] # Get the image from the URL image_response = requests.get(image_url) image = PIL.Image.open(BytesIO(image_response.content)) return image else: raise Exception("API request failed with status code " + str(response.status_code)) # Define the Gradio interface iface = gr.Interface( fn=generate_image, inputs=[ gr.components.Textbox(label="Prompt"), gr.components.Textbox(default="ugly, tiling, poorlydrawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft", label="Negative Prompt"), gr.components.Dropdown(choices=[ "Euler a", "Euler", "LMS", "Heun", "DPM2", "DPM2 a", "DPM++ 2S a", "DPM++ 2M", "DPM++ SDE", "DPM fast", "DPM adaptive", "LMS Karras", "DPM2 Karras", "DPM2 a Karras", "DPM++ 2S a Karras", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM", "PLMS" ], label="Scheduler", default="Euler a"), gr.components.Slider(minimum=10, maximum=100, default=30, label="Steps"), gr.components.Slider(minimum=512, maximum=1600, default=768, label="Width"), gr.components.Slider(minimum=512, maximum=1600, default=768, label="Height"), gr.components.Checkbox(label="Restore Faces"), gr.components.Number(label="Seed", default=None), gr.components.Slider(minimum=4, maximum=12, default==7.0, label="CFG") ], outputs=gr.components.Image(), title="Freedom Demonstration", description = """ Testing environment for the Freedom Model. Finetuned model of SD 2.1 768X produced by @artificialguybr.
The weights were released here.
You can find example prompts here.
Demonstration running on the makeai.run API.
Thanks to Redmond.ai for providing GPU Time and sponsoring this model. """, allow_flagging='never' ) # Launch the app iface.launch()