import gradio as gr import requests import io import random import os from PIL import Image # List of available models list_models = [ "SDXL-1.0", "SD-1.5", "OpenJourney-V4", "Anything-V4", "Disney-Pixar-Cartoon", "Pixel-Art-XL", "Dalle-3-XL", "Midjourney-V4-XL", ] # Function to generate images from text def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7, seed=None): if current_model == "SD-1.5": API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5" elif current_model == "SDXL-1.0": API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0" elif current_model == "OpenJourney-V4": API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney" elif current_model == "Anything-V4": API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0" elif current_model == "Disney-Pixar-Cartoon": API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon" elif current_model == "Pixel-Art-XL": API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl" elif current_model == "Dalle-3-XL": API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl" elif current_model == "Midjourney-V4-XL": API_URL = "https://api-inference.huggingface.co/models/openskyml/midjourney-v4-xl" API_TOKEN = os.environ.get("HF_READ_TOKEN") headers = {"Authorization": f"Bearer {API_TOKEN}"} if image_style == "None style": payload = { "inputs": prompt + ", 8k", "is_negative": is_negative, "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } elif image_style == "Cinematic": payload = { "inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko", "is_negative": is_negative + ", abstract, cartoon, stylized", "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } elif image_style == "Digital Art": payload = { "inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star", "is_negative": is_negative + ", sharp , modern , bright", "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } elif image_style == "Portrait": payload = { "inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)", "is_negative": is_negative, "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } image_bytes = requests.post(API_URL, headers=headers, json=payload).content image = Image.open(io.BytesIO(image_bytes)) return image css = """ /* Custom CSS */ .gradio-container { font-family: 'IBM Plex Sans', sans-serif; max-width: 900px; margin: auto; padding: 2rem; border-radius: 15px; box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); text-align: center; /* Center the content horizontally */ } /* Button Styles */ .gr-button { color: white; background-color: #007bff; /* Use a primary color for the background */ border: none; padding: 10px 20px; border-radius: 8px; cursor: pointer; transition: background-color 0.3s, color 0.3s; } .gr-button:hover { background-color: #0056b3; /* Darken the background color on hover */ } /* Custom CSS */ .gradio-container { font-family: 'IBM Plex Sans', sans-serif; max-width: 900px; margin: auto; padding: 2rem; border-radius: 15px; box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); text-align: center; /* Center the content horizontally */ } /* Button Styles */ .gr-button { color: white; background-color: #007bff; /* Use a primary color for the background */ border: none; padding: 10px 20px; border-radius: 8px; cursor: pointer; transition: background-color 0.3s, color 0.3s; } .gr-button:hover { background-color: #0056b3; /* Darken the background color on hover */ } /* Textbox Styles */ .gr-textbox { border-radius: 8px; border: 1px solid #ccc; padding: 10px; transition: border-color 0.3s; } .gr-textbox:focus { border-color: #007bff; outline: none; } /* Gallery Styles */ #gallery { display: flex; justify-content: center; align-items: center; margin-top: 2rem; } /* Automatically adjust photo size */ #gallery img { max-width: 100%; height: auto; border-radius: 12px; box-shadow: 0px 2px 4px rgba(0, 0, 0, 0.2); } """ # Creating Gradio interface with gr.Blocks(css=css) as demo: with gr.Row(): with gr.Column(): gr.Markdown("

AI Diffusion

") current_model = gr.Dropdown(label="Select Model", choices=list_models, value=list_models[1]) text_prompt = gr.Textbox(label="Enter Prompt", placeholder="Example: a cute dog", lines=2) generate_button = gr.Button("Generate Image", variant='primary') with gr.Column(): gr.Markdown("

Advanced Settings

") with gr.Accordion("Advanced Customizations", open=False): negative_prompt = gr.Textbox(label="Negative Prompt (Optional)", placeholder="Example: blurry, unfocused", lines=2) image_style = gr.Dropdown(label="Select Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="None style") # Add more options if needed with gr.Row(): image_output = gr.Image(type="pil", label="Output Image") generate_button.click(generate_txt2img, inputs=[current_model, text_prompt, negative_prompt, image_style], outputs=image_output) # Launch the app demo.launch()