import gradio as gr import requests import io from PIL import Image import json import os # Load LoRAs from JSON with open('loras.json', 'r') as f: loras = json.load(f) # API call function def query(payload, api_url, token): headers = {"Authorization": f"Bearer {token}"} response = requests.post(api_url, headers=headers, json=payload) return io.BytesIO(response.content) # Gradio UI with gr.Blocks(css="custom.css") as demo: title = gr.HTML( """

LoRA LoRA the Explorer

""", elem_id="title", ) selected_state = gr.State() gallery = gr.Gallery( value=[(item["image"], item["title"]) for item in loras], label="LoRA Gallery", allow_preview=False, columns=3, elem_id="gallery", show_share_button=False ) prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA", elem_id="prompt") advanced_options = gr.Accordion("Advanced options", open=False) weight = gr.Slider(0, 10, value=1, step=0.1, label="LoRA weight") result = gr.Image(interactive=False, label="Generated Image", elem_id="result-image") # Define the function to run when the button is clicked def run_lora(prompt, weight, selected_state): selected_lora = loras[selected_state] api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}" trigger_word = selected_lora["trigger_word"] token = os.getenv("API_TOKEN") payload = {"inputs": f"{prompt} {trigger_word}"} image_bytes = query(payload, api_url, token) return Image.open(image_bytes) prompt.submit( fn=run_lora, inputs=[prompt, weight, selected_state], outputs=[result], )