testgen / app.py
nevreal's picture
Create app.py
c2a8649 verified
raw
history blame
1.98 kB
import gradio as gr
import requests
import os
# Environment variables for API details
API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") # Fetching the API token from environment variable
# Function to query Hugging Face API
def query_huggingface_api(api_url, prompt):
headers = {"Authorization": f"Bearer {API_TOKEN}"}
data = {"inputs": prompt}
response = requests.post(api_url, headers=headers, json=data)
if response.status_code == 200:
# Assuming the API returns binary image data
return response.content
else:
return None, f"Error {response.status_code}: {response.text}"
# Gradio function for generating the image
def generate_image(api_url, prompt):
result, error = query_huggingface_api(f"https://api-inference.huggingface.co/models/{api_url}", prompt)
if result:
return result, None
else:
return None, error
# Create Gradio Blocks Interface
with gr.Blocks() as demo:
gr.Markdown(
"""
# Text to Image Generator
Enter a text prompt, and the custom model will generate an image.
"""
)
with gr.Row():
with gr.Column():
text_input = gr.Textbox(
label="Enter your prompt",
placeholder="Type something here..."
)
model_input = gr.Textbox(
label="Model URL",
placeholder="Enter the model URL...",
value="user/sdwarm"
)
generate_btn = gr.Button("Generate Image")
with gr.Column():
image_output = gr.Image(label="Generated Image")
error_output = gr.Textbox(label="Error", interactive=False)
# Define the action for the button
generate_btn.click(
fn=generate_image,
inputs=[model_input, text_input], # Pass both model URL and prompt
outputs=[image_output, error_output]
)
# Launch the Gradio Blocks WebUI
demo.launch()