import gradio as gr import requests import io from PIL import Image import json import os # Load LoRAs from JSON with open('loras.json', 'r') as f: loras = json.load(f) # API call function def query(payload, api_url, token): headers = {"Authorization": f"Bearer {token}"} response = requests.post(api_url, headers=headers, json=payload) return io.BytesIO(response.content) # Define the function to run when the button is clicked def run_lora(prompt, weight): print("Inside run_lora") selected_lora = loras[0] # You may need to adjust this index if you have multiple models api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}" trigger_word = selected_lora["trigger_word"] token = os.getenv("API_TOKEN") # This will read the API token set in your managed environment payload = {"inputs": f"{prompt} {trigger_word}"} print("Calling query function...") image_bytes = query(payload, api_url, token) print("Query function executed successfully.") return Image.open(image_bytes) # Gradio UI print("Before Gradio Interface") title = gr.HTML("