|
import gradio as gr |
|
import requests |
|
import io |
|
from PIL import Image |
|
import json |
|
import os |
|
|
|
|
|
with open('loras.json', 'r') as f: |
|
loras = json.load(f) |
|
|
|
|
|
def query(payload, api_url, token): |
|
headers = {"Authorization": f"Bearer {token}"} |
|
response = requests.post(api_url, headers=headers, json=payload) |
|
return io.BytesIO(response.content) |
|
|
|
|
|
def run_lora(prompt): |
|
selected_lora = loras[0] |
|
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}" |
|
trigger_word = selected_lora["trigger_word"] |
|
token = os.getenv("API_TOKEN") |
|
payload = {"inputs": f"{prompt} {trigger_word}"} |
|
image_bytes = query(payload, api_url, token) |
|
return Image.open(image_bytes) |
|
|
|
|
|
print("Before Gradio Interface") |
|
|
|
with gr.Blocks() as app: |
|
title = gr.HTML("<h1>LoRA the Explorer</h1>") |
|
gallery = gr.Gallery( |
|
[(item["image"], item["title"]) for item in loras], |
|
label="LoRA Gallery", |
|
allow_preview=False, |
|
columns=3, |
|
) |
|
prompt = gr.Textbox(label="Prompt", lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA") |
|
result = gr.Image(interactive=False, label="Generated Image") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
title |
|
gallery |
|
with gr.Column(): |
|
prompt |
|
gr.Button("Run").click( |
|
fn=run_lora, |
|
inputs=[prompt], |
|
outputs=[result] |
|
) |
|
result |
|
|
|
print("After Gradio Interface") |
|
|
|
|
|
app.launch(debug=True) |
|
|