majedk01's picture
Update app.py
3181370 verified
raw
history blame
No virus
3.18 kB
import torch
import gradio as gr
import requests
from io import BytesIO
from PIL import Image
import os
def translate_text(text, target_language='en'):
API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-ar-en"
headers = {"Authorization": f"Bearer {os.getenv('API_TOKEN')}"}
response = requests.post(API_URL, headers=headers, json={"inputs": text})
if response.status_code == 200:
return response.json()[0]['translation_text']
else:
print("Failed to translate text:", response.text)
return text # Return the original text if translation fails
# Function to post data to an API and return response
def query(payload, API_URL, headers):
response = requests.post(API_URL, headers=headers, json=payload)
return response.content
# Function to generate images based on prompts using the Hugging Face API
def generate_image(prompt, model_choice, translate=False):
if translate:
prompt = translate_text(prompt, target_language='en') # Assuming you want to translate to English
model_urls = {
"Stable Diffusion v1.5": "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5",
"dalle-3-xl-v2": "https://api-inference.huggingface.co/models/ehristoforu/dalle-3-xl-v2",
"midjourney-v6": "https://api-inference.huggingface.co/models/Kvikontent/midjourney-v6",
"openjourney-v4": "https://api-inference.huggingface.co/models/prompthero/openjourney-v4",
"LCM_Dreamshaper_v7": "https://api-inference.huggingface.co/models/SimianLuo/LCM_Dreamshaper_v7",
}
API_URL = model_urls[model_choice]
headers = {"Authorization": f"Bearer {os.getenv('API_TOKEN')}"}
payload = {"inputs": prompt}
data = query(payload, API_URL, headers)
try:
# Load the image from byte data
image = Image.open(BytesIO(data))
# Resize the image
image = image.resize((400, 400))
return image
except Exception as e:
print("Error processing the image:", e)
return None # Return None or an appropriate error message/image
# Set up environment variable correctly
API_TOKEN = os.getenv("API_TOKEN")
# Styling with custom CSS
css = """
body {background-color: #f0f2f5;}
.gradio-app {background-color: #ffffff; border-radius: 12px; box-shadow: 0 0 12px rgba(0,0,0,0.1);}
button {color: white; background-color: #106BA3; border: none; border-radius: 5px;}
"""
# Define interface
title = "نموذج توليد الصور"
description = "اكتب وصف للصورة التي تود من النظام التوليدي انشاءها"
iface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(lines=2, placeholder="Enter the description of the image here..."),
gr.Dropdown(choices=["Stable Diffusion v1.5","dalle-3-xl-v2","midjourney-v6","openjourney-v4","LCM_Dreamshaper_v7"], label="Choose Model", value='Stable Diffusion v1.5'),
gr.Checkbox(label="Translate The Text Before Generating Image", value=False)
],
outputs=gr.Image(),
title=title,
description=description,
theme="default",
css=css
)
# Launch the interface
iface.launch()