artificialguybr's picture
Update app.py
84f40a9
raw
history blame contribute delete
No virus
11.1 kB
import gradio as gr
import requests
import io
import os
import logging
from PIL import Image
from image_processing import downscale_image, limit_colors, convert_to_grayscale, convert_to_black_and_white, resize_image, DITHER_METHODS, QUANTIZATION_METHODS
import json
import time
# Configuração de log
logging.basicConfig(level=logging.DEBUG)
class SomeClass:
def __init__(self):
self.images = []
with open('loras.json', 'r') as f:
loras = json.load(f)
def update_selection(selected_state: gr.SelectData):
logging.debug(f"Inside update_selection, selected_state: {selected_state}")
logging.debug(f"Content of selected_state: {vars(selected_state)}") # Log the content
selected_lora_index = selected_state.index
selected_lora = loras[selected_lora_index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
return (
gr.update(placeholder=new_placeholder),
updated_text,
selected_state
)
def run_lora(prompt, selected_state, progress=gr.Progress(track_tqdm=True)):
logging.debug(f"Inside run_lora, selected_state: {selected_state}")
logging.debug(f"Content of selected_state in run_lora: {vars(selected_state)}")
if not selected_state:
logging.error("selected_state is None or empty. Make sure a LoRA is selected.")
raise gr.Error("You must select a LoRA before proceeding.")
token = os.getenv("API_TOKEN")
if not token:
logging.error("API_TOKEN is not set.")
raise gr.Error("API_TOKEN is not set.")
selected_lora_index = selected_state.index
selected_lora = loras[selected_lora_index]
api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}"
trigger_word = selected_lora["trigger_word"]
payload = {
"inputs": f"{prompt} {trigger_word}",
"parameters": {"negative_prompt": "bad art, ugly, watermark, deformed"},
}
headers = {"Authorization": f"Bearer {token}"}
logging.debug(f"API Request: {api_url}")
logging.debug(f"API Payload: {payload}")
error_count = 0
while True:
response = requests.post(api_url, json=payload, headers=headers)
if response.status_code == 200:
return Image.open(io.BytesIO(response.content))
elif response.status_code == 503:
time.sleep(1)
elif response.status_code == 500 and error_count < 5:
logging.error(response.content)
time.sleep(1)
error_count += 1
else:
logging.error(f"Unexpected API Error: {response.status_code}")
raise gr.Error(f"Unexpected API Error: {response.status_code}")
def postprocess(
image,
enabled,
downscale,
need_rescale,
enable_color_limit,
number_of_colors,
quantization_method,
dither_method,
use_k_means,
is_grayscale,
number_of_shades,
quantization_method_grayscale,
dither_method_grayscale,
use_k_means_grayscale,
is_black_and_white,
is_inversed_black_and_white,
black_and_white_threshold,
use_color_palette,
palette_image,
palette_colors,
dither_method_palette
):
logging.debug(f"Available keys in QUANTIZATION_METHODS: {QUANTIZATION_METHODS.keys()}")
logging.debug(f"Selected quantization_method: {quantization_method}")
if not enabled:
return image
processed_image = image.copy()
if downscale > 1:
processed_image = downscale_image(processed_image, downscale)
if enable_color_limit:
processed_image = limit_colors(
image=processed_image,
limit=number_of_colors,
quantize=QUANTIZATION_METHODS[quantization_method.capitalize()],
dither=DITHER_METHODS[dither_method],
use_k_means=use_k_means
)
if is_grayscale:
processed_image = convert_to_grayscale(processed_image)
processed_image = limit_colors(
image=processed_image,
limit=number_of_shades,
quantize=QUANTIZATION_METHODS[quantization_method_grayscale.capitalize()],
dither=DITHER_METHODS[dither_method_grayscale],
use_k_means=use_k_means_grayscale
)
if is_black_and_white:
processed_image = convert_to_black_and_white(processed_image, black_and_white_threshold, is_inversed_black_and_white)
if use_color_palette:
processed_image = limit_colors(
image=processed_image,
palette=palette_image,
palette_colors=palette_colors,
dither=DITHER_METHODS[dither_method_palette]
)
if need_rescale:
processed_image = resize_image(processed_image, image.size)
return processed_image
def run_and_postprocess(prompt, selected_state, enabled, downscale, need_rescale, enable_color_limit, palette_size_color, quantization_methods_color, dither_methods_color, k_means_color, enable_grayscale, palette_size_gray, quantization_methods_gray, dither_methods_gray, k_means_gray, enable_black_and_white, inverse_black_and_white, threshold_black_and_white, enable_custom_palette, palette_image, palette_size_custom, dither_methods_custom):
# Debug: Starting the function
logging.debug("Starting run_and_postprocess function.")
# Run the original image generation
original_image = run_lora(prompt, selected_state)
# Debug: Confirming that the original image was generated
logging.debug("Original image generated.")
# Post-process the image based on user input
processed_image = postprocess(
original_image,
enabled,
downscale,
need_rescale,
enable_color_limit,
palette_size_color,
quantization_methods_color,
dither_methods_color,
k_means_color,
enable_grayscale,
palette_size_gray,
quantization_methods_gray,
dither_methods_gray,
k_means_gray,
enable_black_and_white,
inverse_black_and_white,
threshold_black_and_white,
enable_custom_palette,
palette_image,
palette_size_custom,
dither_methods_custom
)
# Debug: Confirming that post-processing was applied
if enabled:
logging.debug("Post-processing applied.")
else:
logging.debug("Post-processing not applied.")
return processed_image if enabled else original_image
with gr.Blocks() as app:
title = gr.Markdown("# PIXEL ART GENERATOR")
description = gr.Markdown("### This tool was developed by [@artificialguybr](https://twitter.com/artificialguybr). Generate Pixel Art using Lora from [@artificialguybr](https://twitter.com/artificialguybr) and [@nerijs](https://twitter.com/nerijs).")
selected_state = gr.State()
with gr.Row():
gallery = gr.Gallery([(item["image"], item["title"]) for item in loras], label="LoRA Gallery", allow_preview=False, columns=1)
with gr.Column():
prompt_title = gr.Markdown("### Click on a LoRA in the gallery to create with it")
selected_info = gr.Markdown("")
with gr.Row():
prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA")
button = gr.Button("Run")
result = gr.Image(interactive=False, label="Generated Image")
# Accordion moved here, inside the same gr.Blocks context
with gr.Accordion(label="Pixel art", open=True):
with gr.Row():
enabled = gr.Checkbox(label="Enable", value=False)
downscale = gr.Slider(label="Downscale", minimum=1, maximum=32, step=2, value=8)
need_rescale = gr.Checkbox(label="Rescale to original size", value=True)
with gr.Tabs():
with gr.TabItem("Color"):
enable_color_limit = gr.Checkbox(label="Enable", value=False)
palette_size_color = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
quantization_methods_color = gr.Radio(choices=["Median Cut", "Maximum Coverage", "Fast Octree"], label="Colors Quantization Method", value="Median Cut")
dither_methods_color = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method", value="None")
k_means_color = gr.Checkbox(label="Enable k-means for color quantization", value=True)
with gr.TabItem("Grayscale"):
enable_grayscale = gr.Checkbox(label="Enable", value=False)
palette_size_gray = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
quantization_methods_gray = gr.Radio(choices=["Median Cut", "Maximum Coverage", "Fast Octree"], label="Colors Quantization Method", value="Median Cut")
dither_methods_gray = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method", value="None")
k_means_gray = gr.Checkbox(label="Enable k-means for color quantization", value=True)
with gr.TabItem("Black and white"):
enable_black_and_white = gr.Checkbox(label="Enable", value=False)
inverse_black_and_white = gr.Checkbox(label="Inverse", value=False)
threshold_black_and_white = gr.Slider(label="Threshold", minimum=1, maximum=256, step=1, value=128)
with gr.TabItem("Custom color palette"):
enable_custom_palette = gr.Checkbox(label="Enable", value=False)
palette_image = gr.Image(label="Color palette image", type="pil")
palette_size_custom = gr.Slider(label="Palette Size", minimum=1, maximum=256, step=1, value=16)
dither_methods_custom = gr.Radio(choices=["None", "Floyd-Steinberg"], label="Colors Dither Method", value="None")
# The rest of your code for setting up the app
gallery.select(update_selection, outputs=[prompt, selected_info, selected_state])
prompt.submit(fn=run_lora, inputs=[prompt, selected_state], outputs=[result])
button.click(
fn=run_and_postprocess,
inputs=[
prompt,
selected_state,
enabled,
downscale,
need_rescale,
enable_color_limit,
palette_size_color,
quantization_methods_color,
dither_methods_color,
k_means_color,
enable_grayscale,
palette_size_gray,
quantization_methods_gray,
dither_methods_gray,
k_means_gray,
enable_black_and_white,
inverse_black_and_white,
threshold_black_and_white,
enable_custom_palette,
palette_image,
palette_size_custom,
dither_methods_custom
],
outputs=[result]
)
app.queue(max_size=20, concurrency_count=5)
app.launch()