Spaces:
Running
on
Zero
Running
on
Zero
artificialguybr
commited on
Commit
•
063fc12
1
Parent(s):
ccde57b
Update app.py
Browse files
app.py
CHANGED
@@ -14,10 +14,14 @@ logging.basicConfig(level=logging.DEBUG)
|
|
14 |
with open('loras.json', 'r') as f:
|
15 |
loras = json.load(f)
|
16 |
|
17 |
-
# Define the function to run when the button is clicked
|
18 |
def update_selection(selected_state: gr.SelectData):
|
19 |
logging.debug(f"Inside update_selection, selected_state: {selected_state}")
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
21 |
logging.debug(f"Updated selected_state: {selected_state}")
|
22 |
return (
|
23 |
updated_text,
|
@@ -33,21 +37,12 @@ def run_lora(prompt, selected_state, progress=gr.Progress(track_tqdm=True)):
|
|
33 |
if not selected_state:
|
34 |
logging.error("selected_state is None or empty.")
|
35 |
raise gr.Error("You must select a LoRA")
|
36 |
-
|
37 |
selected_lora_index = selected_state['index']
|
38 |
selected_lora = loras[selected_lora_index]
|
39 |
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
|
40 |
trigger_word = selected_lora["trigger_word"]
|
41 |
token = os.getenv("API_TOKEN")
|
42 |
payload = {"inputs": f"{prompt} {trigger_word}"}
|
43 |
-
|
44 |
-
# API call
|
45 |
-
headers = {"Authorization": f"Bearer {token}"}
|
46 |
-
response = requests.post(api_url, headers=headers, json=payload)
|
47 |
-
if response.status_code == 200:
|
48 |
-
return Image.open(io.BytesIO(response.content))
|
49 |
-
else:
|
50 |
-
return "API Error"
|
51 |
|
52 |
# Gradio UI
|
53 |
with gr.Blocks(css="custom.css") as app:
|
|
|
14 |
with open('loras.json', 'r') as f:
|
15 |
loras = json.load(f)
|
16 |
|
|
|
17 |
def update_selection(selected_state: gr.SelectData):
|
18 |
logging.debug(f"Inside update_selection, selected_state: {selected_state}")
|
19 |
+
selected_lora_index = selected_state['index']
|
20 |
+
updated_text = loras[selected_lora_index]['title']
|
21 |
+
instance_prompt = "Your custom instance prompt here"
|
22 |
+
new_placeholder = "Your new placeholder here"
|
23 |
+
use_with_diffusers = "Your use_with_diffusers here"
|
24 |
+
use_with_uis = "Your use_with_uis here"
|
25 |
logging.debug(f"Updated selected_state: {selected_state}")
|
26 |
return (
|
27 |
updated_text,
|
|
|
37 |
if not selected_state:
|
38 |
logging.error("selected_state is None or empty.")
|
39 |
raise gr.Error("You must select a LoRA")
|
|
|
40 |
selected_lora_index = selected_state['index']
|
41 |
selected_lora = loras[selected_lora_index]
|
42 |
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
|
43 |
trigger_word = selected_lora["trigger_word"]
|
44 |
token = os.getenv("API_TOKEN")
|
45 |
payload = {"inputs": f"{prompt} {trigger_word}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# Gradio UI
|
48 |
with gr.Blocks(css="custom.css") as app:
|