wgetdd's picture
Initial Commit
c324d8c
raw
history blame
9.8 kB
import gradio as gr
from torchvision import transforms
import torch
from main_inference import generate_mixed_image, generate_image, progress_video
import matplotlib.colors as mcolors
def run_generate_mixed_image(prompt1,prompt2):
image = generate_mixed_image(prompt1,prompt2)
return image
def run_generate_image(prompt1,noise_checkbox):
image = generate_image(prompt1,noised_image=noise_checkbox)
return image
def run_generate_image_with_color_doninance(prompt1,color,color_loss_scale,noised_image_checkbox_1):
# Convert the hexadecimal color code to RGB values
rgba_color = mcolors.hex2color(color)
# Multiply the RGB values by 255 to get them in the [0, 255] range
rgb_values = [int(val * 255) for val in rgba_color]
image = generate_image(prompt1,True,color,color_loss_scale,noised_image_checkbox_1)
return image
def run_process_video(prompt):
# Ask for text input
video = progress_video(prompt)
return video
description_text_to_image = """ ### Text to Image Generation
1. Write a Text Prompt.
2. Output will be an image based on the text prompt provided.
3. Check if you want to see noised version of the image
"""
description_generate_mixed_image = """ ### Mix Image Generation
1. Write Two Text prompts.
2. Output will a image which is mix of both of the text provided.
3. Check if you want to see noised version of the image
"""
description_generate_image_with_color_dominance = """ ### Generate Images with color dominance
1. Write a Text Prompt.
2. Select a color
3. Choose Color loss value
4. Get the generated Image
5. Check if you want to see noised version of the image
"""
description_progress_video = """ ### Get the full generation process video
1. Write a Text Prompt.
2. Output will be the video which contains frames of generated image, during various inference steps
"""
# Description
title = "<center><strong><font size='8'>The Stable Diffusion</font></strong></center>"
image_input1 = gr.Image(type='pil')
image_input2 = gr.Image(type='filepath')
image_input3 = gr.Image(type='pil')
image_input4 = gr.Image(type='pil')
text_input = gr.Text(label="Enter Text Prompt")
text_input2 = gr.Text(label="Enter Text Prompt")
text_input3 = gr.Text(label="Enter Text Prompt")
text_input4 = gr.Text(label = "Enter Text Prompt")
text_input5 = gr.Text(label = "Enter Text Prompt")
video_output = gr.Video()
color = gr.ColorPicker(label="Select a Color",description="Choose a color from the color picker:")
noised_image_checkbox = gr.inputs.Checkbox(default=False, label="Show Noised Image")
noised_image_checkbox_1 = gr.inputs.Checkbox(default=False, label="Show Noised Image")
noised_image_checkbox_2 = gr.inputs.Checkbox(default=False, label="Show Noised Image")
color_loss_scale = gr.inputs.Slider(minimum=0, maximum=255, default=40, step=1,label="Color Loss")
css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
with gr.Blocks(css=css, title='Play with Stable Diffusion') as demo:
with gr.Row():
with gr.Column(scale=1):
# Title
gr.Markdown(title)
with gr.Tab("Generate Image"):
# Images
with gr.Row(variant="panel"):
with gr.Column(scale=1):
text_input.render()
noised_image_checkbox.render()
with gr.Column(scale=1):
image_input1.render()
# Submit & Clear
with gr.Row():
with gr.Column():
run_generate_image_button = gr.Button("generate_image", variant='primary')
clear_btn_text_to_image = gr.Button("Clear", variant="secondary")
gr.Markdown(description_text_to_image)
# gr.Examples(examples = ["A White cat", "a dog playing in garden", "people enjoying around sea"],
# inputs=[text_input,red_color_slider,green_color_slider,blue_color_slider,noised_image_checkbox],
# outputs=image_input1,
# fn=run_generate_image,
# cache_examples=True,
# examples_per_page=4)
run_generate_image_button.click(run_generate_image,
inputs=[text_input,noised_image_checkbox],
outputs=image_input1)
with gr.Tab("Generate Image with Color Dominance"):
# Images
with gr.Row(variant="panel"):
with gr.Column(scale=1):
text_input4.render()
color_loss_scale.render()
noised_image_checkbox_1.render()
color.render()
with gr.Column(scale=1):
image_input3.render()
# Submit & Clear
with gr.Row():
with gr.Column():
run_generate_image_with_color_doninance_button = gr.Button("generate_image_with_color_doninance", variant='primary')
clear_btn_text_to_image = gr.Button("Clear", variant="secondary")
gr.Markdown(description_generate_image_with_color_dominance)
# gr.Examples(examples = ["A White cat", "a dog playing in garden", "people enjoying around sea"],
# inputs=[text_input4,red_color_slider,green_color_slider,blue_color_slider,noised_image_checkbox],
# outputs=image_input3,
# fn=run_generate_image_with_color_doninance,
# cache_examples=True,
# examples_per_page=4)
run_generate_image_with_color_doninance_button.click(run_generate_image_with_color_doninance,
inputs=[text_input4,color,color_loss_scale,noised_image_checkbox_1],
outputs=image_input3)
####################################################################################################################
with gr.Tab("Generate Mixed Image"):
# Images
with gr.Row(variant="panel"):
with gr.Column(scale=1):
text_input2.render()
text_input3.render()
noised_image_checkbox_2.render()
with gr.Column(scale=1):
image_input4.render()
# Submit & Clear
with gr.Row():
with gr.Column():
run_generate_mixed_image_button = gr.Button("generate_mixed_image", variant='primary')
clear_btn_image_to_image = gr.Button("Clear", variant="secondary")
gr.Markdown(description_generate_mixed_image)
# gr.Examples(examples = ["examples/12830823_87d2654e31.jpg", "examples/27782020_4dab210360.jpg", "examples/44129946_9eeb385d77.jpg"],
# inputs=[text_input,red_color_slider_1,green_color_slider_1,blue_color_slider_1,noised_image_checkbox],
# outputs=image_input4,
# fn=run_generate_mixed_image,
# cache_examples=True,
# examples_per_page=4)
run_generate_mixed_image_button.click(run_generate_mixed_image,
inputs=[text_input2,text_input3,noised_image_checkbox_2],
outputs=image_input4)
####################################################################################################################
with gr.Tab("progress_video"):
# Images
with gr.Row(variant="panel"):
with gr.Column(scale=1):
text_input5.render()
with gr.Column(scale=1):
video_output.render()
# Submit & Clear
with gr.Row():
with gr.Column():
run_progress_video_button = gr.Button("progress_video", variant='primary')
clear_btn_progress_video = gr.Button("Clear", variant="secondary")
gr.Markdown(description_progress_video)
# gr.Examples(examples = ["examples/12830823_87d2654e31.jpg", "examples/27782020_4dab210360.jpg", "examples/44129946_9eeb385d77.jpg"],
# inputs=[text_input5],
# outputs=video_output,
# fn=run_process_video,
# examples_per_page=4)
run_progress_video_button.click(run_process_video,
inputs=[
text_input5,
],
outputs=video_output)
#######################################################################################################################
#######################################################################################################################
def clear():
return None, None
def clear_text():
return None, None, None
clear_btn_text_to_image.click(clear, outputs=[image_input1, image_input1])
clear_btn_image_to_image.click(clear, outputs=[image_input2, image_input3])
clear_btn_progress_video.click(clear, outputs=[image_input2, image_input3])
demo.queue()
demo.launch(debug=True)