import json import random import gradio as gr from pathlib import Path import os import requests from PIL import Image import io import pathlib API_TOKEN = os.environ.get("HF_READ_TOKEN") base_dir = "." dropdown_options_file = Path(base_dir, "json/dropdown_options.json") category_data_file = Path(base_dir, "json/category_data.json") style_data_file = Path(base_dir, "json/style_data.json") prefix_data_file = Path(base_dir, "json/prefix_data.json") lightning_data_file = Path(base_dir, "json/lightning_data.json") lens_data_file = Path(base_dir, "json/lens_data.json") class Model: ''' Small strut to hold data for the text generator ''' def __init__(self, name) -> None: self.name = name pass def populate_dropdown_options(): path = dropdown_options_file with open(path, 'r') as f: data = json.load(f) category_choices = data["category"] style_choices = data["style"] lightning_choices = data["lightning"] lens_choices = data["lens"] return tuple(category_choices), tuple(style_choices), tuple(lightning_choices), tuple(lens_choices), def add_to_prompt(*args): prompt, use_default_negative_prompt, base_prompt, negative_base_prompt = args default_negative_prompt = "(worst quality:1.2), (low quality:1.2), (lowres:1.1), (monochrome:1.1), (greyscale), multiple views, comic, sketch, (((bad anatomy))), (((deformed))), (((disfigured))), watermark, multiple_views, mutation hands, mutation fingers, extra fingers, missing fingers, watermark" if(use_default_negative_prompt): return "{} {}".format(base_prompt ,prompt), default_negative_prompt else: return "{} {}".format(base_prompt ,prompt), "" def get_random_prompt(data): random_key = random.choice(list(data.keys())) random_array = random.choice(data[random_key]) random_strings = random.sample(random_array, 3) return random_strings def get_correct_prompt(data, selected_dropdown): correct_array = data[selected_dropdown] random_array = random.choice(correct_array) random_strings = random.sample(random_array, 3) random_strings.insert(0, selected_dropdown) return random_strings def generate_prompt_output(*args): #all imported files prefix_path = prefix_data_file category_path = category_data_file style_path = style_data_file lightning_path = lightning_data_file lens_path = lens_data_file #destructure args category, style, lightning, lens, negative_prompt = args # Convert variables to lowercase category = category.lower() style = style.lower() lightning = lightning.lower() lens = lens.lower() # Open category_data.json and grab correct text with open(prefix_path, 'r') as f: prefix_data = json.load(f) prefix_prompt = random.sample(prefix_data, 6) modified_prefix_prompt = [f"(({item}))" for item in prefix_prompt] # Open category_data.json and grab correct text with open(category_path, 'r') as f2: category_data = json.load(f2) if category == "none": category_prompt = "" elif category == "random": category_prompt = get_random_prompt(category_data) else: category_prompt = get_correct_prompt(category_data, category) # Open style_data.json and grab correct text with open(style_path, 'r') as f3: style_data = json.load(f3) if style == "none": style_prompt = "" elif style == "random": style_prompt = get_random_prompt(style_data) else: style_prompt = get_correct_prompt(style_data, style) # Open lightning_data.json and grab correct text with open(lightning_path, 'r') as f4: lightning_data = json.load(f4) if lightning == "none": lightning_prompt = "" elif lightning == "random": lightning_prompt = get_random_prompt(lightning_data) else: lightning_prompt = get_correct_prompt(lightning_data, lightning) # Open lens_data.json and grab correct text with open(lens_path, 'r') as f5: lens_data = json.load(f5) if lens == "none": lens_prompt = "" elif lens == "random": lens_prompt = get_random_prompt(lens_data) else: lens_prompt = get_correct_prompt(lens_data, lens) prompt_output = modified_prefix_prompt, category_prompt, style_prompt, lightning_prompt, lens_prompt prompt_strings = [] for sublist in prompt_output: # Join the sublist elements into a single string prompt_string = ", ".join(str(item) for item in sublist) if prompt_string: # Check if the prompt_string is not empty prompt_strings.append(prompt_string) # Join the non-empty prompt_strings final_output = ", ".join(prompt_strings) return final_output list_models = [ "SDXL-1.0", "SD-1.5", "OpenJourney-V4", "Anything-V4", "Disney-Pixar-Cartoon", "Pixel-Art-XL", "Dalle-3-XL", "Midjourney-V4-XL", ] def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7, seed=None, API_TOKEN = API_TOKEN): print("call {} {} one time".format(current_model, prompt)) ''' import shutil im_save_dir = "local_img_dir" if not os.path.exists(im_save_dir): #shutil.rmtree(im_save_dir) os.mkdir(im_save_dir) ''' if current_model == "SD-1.5": API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5" elif current_model == "SDXL-1.0": API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0" elif current_model == "OpenJourney-V4": API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney" elif current_model == "Anything-V4": API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0" elif current_model == "Disney-Pixar-Cartoon": API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon" elif current_model == "Pixel-Art-XL": API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl" elif current_model == "Dalle-3-XL": API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl" elif current_model == "Midjourney-V4-XL": API_URL = "https://api-inference.huggingface.co/models/openskyml/midjourney-v4-xl" #API_TOKEN = os.environ.get("HF_READ_TOKEN") headers = {"Authorization": f"Bearer {API_TOKEN}"} if type(prompt) != type(""): prompt = DEFAULT_PROMPT if image_style == "None style": payload = { "inputs": prompt + ", 8k", "is_negative": is_negative, "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } elif image_style == "Cinematic": payload = { "inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko", "is_negative": is_negative + ", abstract, cartoon, stylized", "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } elif image_style == "Digital Art": payload = { "inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star", "is_negative": is_negative + ", sharp , modern , bright", "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } elif image_style == "Portrait": payload = { "inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)", "is_negative": is_negative, "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed is not None else random.randint(-1, 2147483647) } image_bytes = requests.post(API_URL, headers=headers, json=payload).content image = Image.open(io.BytesIO(image_bytes)) ''' from uuid import uuid1 path = os.path.join(im_save_dir ,"{}.png".format(uuid1())) image.save(path) return path ''' return image #yield image #return [image] def on_ui_tabs(): ''' # UI structure txt2img_prompt = modules.ui.txt2img_paste_fields[0][0] img2img_prompt = modules.ui.img2img_paste_fields[0][0] txt2img_negative_prompt = modules.ui.txt2img_paste_fields[1][0] img2img_negative_prompt = modules.ui.img2img_paste_fields[1][0] ''' with gr.Blocks(css = ''' .header img { float: middle; width: 33px; height: 33px; } .header h1 { top: 18px; left: 10px; } ''' ) as prompt_generator: gr.HTML( '''

logo ๐Ÿง‘โ€๐ŸŽจ Next Diffusion Prompt On Stable Diffuison

''') with gr.Tab("Prompt Generator"): with gr.Row(): # Use Row to arrange two columns side by side with gr.Column(): # Left column for dropdowns category_choices, style_choices, lightning_choices, lens_choices = populate_dropdown_options() with gr.Row(): gr.HTML('''

Input ๐Ÿ‘‡

''') with gr.Row(): # Create a dropdown to select with gr.Row(): txt2img_prompt = gr.Textbox(label = "txt2img_prompt", interactive = True) txt2img_negative_prompt = gr.Textbox(label = "txt2img_negative_prompt", interactive = True) ''' with gr.Row(): img2img_prompt = gr.Textbox(label = "img2img_prompt", interactive = True) img2img_negative_prompt = gr.Textbox(label = "img2img_negative_prompt", interactive = True) ''' with gr.Row(): current_model = gr.Dropdown(label="Current Model", choices=list_models, value=list_models[1]) text_button = gr.Button("Generate image by Stable Diffusion") with gr.Row(): image_output = gr.Image(label="Output Image", type = "filepath", elem_id="gallery", height = 512, show_share_button = True ) #image_gallery = gr.Gallery(height = 512, label = "Output Gallery") #image_file = gr.File(label="Output Image File") with gr.Column(): # Right column for result_textbox and generate_button # Add a Textbox to display the generated text with gr.Row(): gr.HTML('''

Prompt Extender by Rule ๐Ÿ‘‹ (aid Input ๐Ÿ‘ˆ)

''') with gr.Row().style(equal_height=True): # Place dropdowns side by side category_dropdown = gr.Dropdown( choices=category_choices, value=category_choices[1], label="Category", show_label=True ) style_dropdown = gr.Dropdown( choices=style_choices, value=style_choices[1], label="Style", show_label=True ) with gr.Row(): lightning_dropdown = gr.Dropdown( choices=lightning_choices, value=lightning_choices[1], label="Lightning", show_label=True ) lens_dropdown = gr.Dropdown( choices=lens_choices, value=lens_choices[1], label="Lens", show_label=True ) result_textbox = gr.Textbox(label="Generated Prompt", lines=3) use_default_negative_prompt = gr.Checkbox(label="Include Negative Prompt", value=True, interactive=True, elem_id="negative_prompt_checkbox") setattr(use_default_negative_prompt,"do_not_save_to_config",True) with gr.Row(): generate_button = gr.Button(value="Generate", elem_id="generate_button") clear_button = gr.Button(value="Clear") with gr.Row(): txt2img = gr.Button("Send to txt2img") #img2img = gr.Button("Send to img2img") with gr.Row(): gr.HTML('''
''') with gr.Row(): gr.HTML('''

Links

''') with gr.Row(): gr.HTML('''

Stable Diffusion Tutorialsโšก

''') ''' with gr.Accordion("Advanced settings", open=True): negative_prompt = gr.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", lines=1, elem_id="negative-prompt-text-input") image_style = gr.Dropdown(label="Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="None style", allow_custom_value=False) with gr.Row(): ''' # Create a button to trigger text generation txt2img.click(add_to_prompt, inputs=[result_textbox, use_default_negative_prompt, txt2img_prompt, txt2img_negative_prompt], outputs=[txt2img_prompt, txt2img_negative_prompt ]) #img2img.click(add_to_prompt, inputs=[result_textbox, use_default_negative_prompt, img2img_prompt, img2img_negative_prompt], outputs=[img2img_prompt, img2img_negative_prompt]) clear_button.click(lambda x: [""] * 3 + ["Random", "Random", "Random", "Random"], None, [result_textbox, txt2img_prompt, txt2img_negative_prompt, category_dropdown, style_dropdown, lightning_dropdown, lens_dropdown ]) text_button.click(generate_txt2img, inputs=[current_model, txt2img_prompt, txt2img_negative_prompt], outputs=image_output, ) # Register the callback for the Generate button generate_button.click(fn=generate_prompt_output, inputs=[category_dropdown, style_dropdown, lightning_dropdown, lens_dropdown, use_default_negative_prompt], outputs=[result_textbox]) gr.Examples( [ #["A lovely cat", "low quality, blur", "OpenJourney-V4", "Anime", "Drawing", "Bloom light", "F/14"], ["Forest house", "low quality, blur", "SD-1.5", "None", "Photograph", "Beautifully lit", "800mm lens"], ["A girl in pink", "low quality, blur", "SD-1.5", "Anime", "3D style", "None", "Random"], ], inputs = [txt2img_prompt, txt2img_negative_prompt, current_model, category_dropdown, style_dropdown, lightning_dropdown, lens_dropdown] ) return prompt_generator with on_ui_tabs() as demo: pass demo.launch(show_api = False)