Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| import sys | |
| from pathlib import Path | |
| from PIL import Image | |
| import re | |
| from PIL import Image | |
| import numpy as np | |
| # Coder: Create directories if they don't exist | |
| if not os.path.exists('saved_prompts'): | |
| os.makedirs('saved_prompts') | |
| if not os.path.exists('saved_images'): | |
| os.makedirs('saved_images') | |
| # Humanities: Elegant function to generate a safe filename 📝 | |
| def generate_safe_filename(text): | |
| return re.sub('[^a-zA-Z0-9]', '_', text) | |
| def load_models_from_file(filename): | |
| with open(filename, 'r') as f: | |
| return [line.strip() for line in f] | |
| if __name__ == "__main__": | |
| models = load_models_from_file('models.txt') | |
| print(models) | |
| #removed to removed.txt | |
| current_model = models[0] | |
| #text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") | |
| #text_gen1=gr.Interface.load("awacke1/MagicPrompt-Stable-Diffusion", src="spaces") | |
| text_gen1=gr.Interface.load("awacke1/MagicPrompt-Stable-Diffusion", live=True, src="spaces") | |
| models2 = [gr.Interface.load(f"models/{model}", live=True, preprocess=False, src="models") for model in models] | |
| def text_it1(inputs,text_gen1=text_gen1): | |
| go_t1=text_gen1(inputs) | |
| return(go_t1) | |
| def set_model(current_model): | |
| current_model = models[current_model] | |
| return gr.update(label=(f"{current_model}")) | |
| # Analysis: Function to list saved prompts and images 📊 | |
| def list_saved_prompts_and_images(): | |
| saved_prompts = os.listdir('saved_prompts') | |
| saved_images = os.listdir('saved_images') | |
| html_str = "<h2>Saved Prompts and Images:</h2><ul>" | |
| for prompt_file in saved_prompts: | |
| image_file = f"{prompt_file[:-4]}.png" | |
| if image_file in saved_images: | |
| html_str += f'<li>Prompt: {prompt_file[:-4]} | <a href="saved_images/{image_file}" download>Download Image</a></li>' | |
| html_str += "</ul>" | |
| return html_str | |
| # Coder: Modified function to save the prompt and image 🖼️ | |
| def send_it1(inputs, model_choice): | |
| proc1 = models2[model_choice] | |
| output1 = proc1(inputs) | |
| safe_filename = generate_safe_filename(inputs[0]) | |
| image_path = f"saved_images/{safe_filename}.png" | |
| prompt_path = f"saved_prompts/{safe_filename}.txt" | |
| with open(prompt_path, 'w') as f: | |
| f.write(inputs[0]) | |
| # Check the type of output1 before saving | |
| if isinstance(output1, np.ndarray): # If it's a numpy array | |
| Image.fromarray(np.uint8(output1)).save(image_path) | |
| elif isinstance(output1, Image.Image): # If it's already a PIL Image | |
| output1.save(image_path) | |
| elif isinstance(output1, str): # If it's a string (this should not happen in ideal conditions) | |
| print(f"Warning: output1 is a string. Cannot save as image. Value: {output1}") | |
| else: | |
| print(f"Warning: Unexpected type {type(output1)} for output1.") | |
| #Image.fromarray(output1).save(image_path) | |
| saved_output.update(list_saved_prompts_and_images()) | |
| return output1 | |
| css="""""" | |
| with gr.Blocks(css=css) as myface: | |
| gr.HTML("""<!DOCTYPE html> | |
| <html lang="en"> | |
| <head> | |
| <meta charset="utf-8" /> | |
| <meta name="twitter:card" content="player"/> | |
| <meta name="twitter:site" content=""/> | |
| <meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/> | |
| <meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/> | |
| <meta name="twitter:player:width" content="100%"/> | |
| <meta name="twitter:player:height" content="600"/> | |
| <meta property="og:title" content="Embedded Live Viewer"/> | |
| <meta property="og:description" content="Tweet Genie - A Huggingface Space"/> | |
| <meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/> | |
| <!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">--> | |
| </head> | |
| </html> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=100): | |
| saved_output = gr.HTML(label="Saved Prompts and Images") | |
| with gr.Row(): | |
| with gr.Tab("Title"): | |
| gr.HTML("""<title>Prompt to Generate Image</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;"> | |
| <h1>Enter a Prompt in Textbox then click Generate Image</h1>""") | |
| with gr.Tab("Tools"): | |
| with gr.Tab("View"): | |
| with gr.Row(): | |
| with gr.Column(style="width=50%, height=70%"): | |
| gr.Pil(label="Crop") | |
| with gr.Column(style="width=50%, height=70%"): | |
| gr.Pil(label="Crop") | |
| with gr.Tab("Draw"): | |
| with gr.Column(style="width=50%, height=70%"): | |
| gr.Pil(label="Crop") | |
| with gr.Column(style="width=50%, height=70%"): | |
| gr.Pil(label="Draw") | |
| gr.ImagePaint(label="Draw") | |
| with gr.Tab("Text"): | |
| with gr.Row(): | |
| with gr.Column(scale=50): | |
| gr.Textbox(label="", lines=8, interactive=True) | |
| with gr.Column(scale=50): | |
| gr.Textbox(label="", lines=8, interactive=True) | |
| with gr.Tab("Color Picker"): | |
| with gr.Row(): | |
| with gr.Column(scale=50): | |
| gr.ColorPicker(label="Color", interactive=True) | |
| with gr.Column(scale=50): | |
| gr.ImagePaint(label="Draw", interactive=True) | |
| with gr.Row(): | |
| with gr.Column(scale=100): | |
| magic1=gr.Textbox(lines=4) | |
| run=gr.Button("Generate Image") | |
| with gr.Row(): | |
| with gr.Column(scale=100): | |
| model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True) | |
| with gr.Row(): | |
| with gr.Column(style="width=800px"): | |
| output1=gr.Image(label=(f"{current_model}")) | |
| # Check the type before attempting to save the image | |
| if isinstance(output1, Image.Image): # Check if it's a PIL Image object | |
| output1.save(image_path) | |
| elif isinstance(output1, np.ndarray): # Check if it's a NumPy array | |
| Image.fromarray(np.array(output1, dtype=np.uint8)).save(image_path) | |
| else: | |
| print(f"Warning: Unexpected type {type(output1)} for output1.") | |
| with gr.Row(): | |
| with gr.Column(scale=50): | |
| input_text=gr.Textbox(label="Prompt Idea",lines=2) | |
| use_short=gr.Button("Use Short Prompt") | |
| see_prompts=gr.Button("Extend Idea") | |
| with gr.Row(): | |
| with gr.Column(scale=100): | |
| saved_output = gr.HTML(label=list_saved_prompts_and_images(), live=True) | |
| def short_prompt(inputs): | |
| return(inputs) | |
| use_short.click(short_prompt,inputs=[input_text],outputs=magic1) | |
| see_prompts.click(text_it1,inputs=[input_text],outputs=magic1) | |
| # Reasoning: Link functions to Gradio components 🎛️ | |
| model_name1.change(set_model, inputs=model_name1, outputs=[output1]) | |
| run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1]) | |
| myface.queue(concurrency_count=200) | |
| myface.launch(inline=True, show_api=False, max_threads=400) |