prodia2 / app.py
Dagfinn1962's picture
Duplicate from pikto/prodia
e774b98
raw
history blame
12 kB
import numpy as np
import gradio as gr
import ast
import requests
import logging
from rembg import new_session
from cutter import remove, make_label
from utils import *
API_URL_INITIAL = "https://ysharma-playground-ai-exploration.hf.space/run/initial_dataframe"
API_URL_NEXT10 = "https://ysharma-playground-ai-exploration.hf.space/run/next_10_rows"
from theme_dropdown import create_theme_dropdown # noqa: F401
dropdown, js = create_theme_dropdown()
models = [
{"name": "Stable Diffusion 2", "url": "stabilityai/stable-diffusion-2-1"},
{"name": "stability AI", "url": "stabilityai/stable-diffusion-2-1-base"},
{"name": "Compressed-S-D", "url": "nota-ai/bk-sdm-small"},
{"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"},
{"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"},
{"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"},
{"name": "Robo Diffusion", "url": "nousr/robo-diffusion"},
{"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"},
]
#### REM-BG
remove_bg_models = {
"TracerUniversalB7": "TracerUniversalB7",
"U2NET": "u2net",
"U2NET Human Seg": "u2net_human_seg",
"U2NET Cloth Seg": "u2net_cloth_seg"
}
model_choices = keys(remove_bg_models)
def predict(image, session, smoot, matting, bg_color):
session = new_session(remove_bg_models[session])
try:
return remove(session, image, smoot, matting, bg_color)
except ValueError as err:
logging.error(err)
return make_label(str(err)), None
def change_show_mask(chk_state):
return gr.Image.update(visible=chk_state)
def change_include_matting(chk_state):
return gr.Box.update(visible=chk_state), (0, 0, 0), 0, 0, 0
def change_foreground_threshold(fg_value, value):
fg, bg, erode = value
return fg_value, bg, erode
def change_background_threshold(bg_value, value):
fg, bg, erode = value
return fg, bg_value, erode
def change_erode_size(erode_value, value):
fg, bg, erode = value
return fg, bg, erode_value
def set_dominant_color(chk_state):
return chk_state, gr.ColorPicker.update(value=False, visible=not chk_state)
def change_picker_color(picker, dominant):
if not dominant:
return picker
return dominant
def change_background_mode(chk_state):
return gr.ColorPicker.update(value=False, visible=chk_state), \
gr.Checkbox.update(value=False, visible=chk_state)
###########
text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
current_model = models[0]
models2 = []
for model in models:
model_url = f"models/{model['url']}"
loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
models2.append(loaded_model)
def text_it(inputs, text_gen=text_gen):
return text_gen(inputs)
def flip_text(x):
return x[::-1]
def send_it(inputs, model_choice):
proc = models2[model_choice]
return proc(inputs)
def flip_image(x):
return np.fliplr(x)
def set_model(current_model_index):
global current_model
current_model = models[current_model_index]
return gr.update(value=f"{current_model['name']}")
#define inference function
#First: Get initial images for the grid display
def get_initial_images():
response = requests.post(API_URL_INITIAL, json={
"data": []
}).json()
#data = response["data"][0]['data'][0][0][:-1]
response_dict = response['data'][0]
return response_dict #, [resp[0][:-1] for resp in response["data"][0]["data"]]
#Second: Process response dictionary to get imges as hyperlinked image tags
def process_response(response_dict):
return [resp[0][:-1] for resp in response_dict["data"]]
response_dict = get_initial_images()
initial = process_response(response_dict)
initial_imgs = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);">\n' + "\n".join(initial[:-1])
#Third: Load more images for the grid
def get_next10_images(response_dict, row_count):
row_count = int(row_count)
#print("(1)",type(response_dict))
#Convert the string to a dictionary
if isinstance(response_dict, dict) == False :
response_dict = ast.literal_eval(response_dict)
response = requests.post(API_URL_NEXT10, json={
"data": [response_dict, row_count ] #len(initial)-1
}).json()
row_count+=10
response_dict = response['data'][0]
#print("(2)",type(response))
#print("(3)",type(response['data'][0]))
next_set = [resp[0][:-1] for resp in response_dict["data"]]
next_set_images = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); ">\n' + "\n".join(next_set[:-1])
return response_dict, row_count, next_set_images #response['data'][0]
with gr.Blocks(theme='pikto/theme@>=0.0.1,<0.0.3') as pan:
gr.Markdown("AI CONTENT TOOLS.")
with gr.Tab("T-to-I"):
##model = ("stabilityai/stable-diffusion-2-1")
model_name1 = gr.Dropdown(
label="Choose Model",
choices=[m["name"] for m in models],
type="index",
value=current_model["name"],
interactive=True,
)
input_text = gr.Textbox(label="Prompt idea",)
## run = gr.Button("Generate Images")
with gr.Row():
see_prompts = gr.Button("Generate Prompts")
run = gr.Button("Generate Images", variant="primary")
with gr.Row():
magic1 = gr.Textbox(label="Generated Prompt", lines=2)
output1 = gr.Image(label="")
with gr.Row():
magic2 = gr.Textbox(label="Generated Prompt", lines=2)
output2 = gr.Image(label="")
run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2,])
with gr.Tab("AI Library"):
#Using Gradio Demos as API - This is Hot!
#get_next10_images(response_dict=response_dict, row_count=9)
#position: fixed; top: 0; left: 0; width: 100%; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
#Defining the Blocks layout
# with gr.Blocks(css = """#img_search img {width: 100%; height: 100%; object-fit: cover;}""") as demo:
gr.HTML(value="top of page", elem_id="top",visible=False)
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
Using Gradio API - 2 </h1><br></div>
<div><h4 style="font-weight: 500; margin-bottom: 7px; margin-top: 5px;">
Stream <a href="https://github.com/playgroundai/liked_images" target="_blank">PlaygroundAI Images</a> ina beautiful grid</h4><br>
</div>""")
with gr.Tab("AI Library"):
#with gr.Tab(): #(elem_id = "col-container"):
#gr.Column(): #(elem_id = "col-container"):
b1 = gr.Button("Load More Images").style(full_width=False)
df = gr.Textbox(visible=False,elem_id='dataframe', value=response_dict)
row_count = gr.Number(visible=False, value=19 )
img_search = gr.HTML(label = 'Images from PlaygroundAI dataset', elem_id="img_search",
value=initial_imgs ) #initial[:-1] )
b1.click(get_next10_images, [df, row_count], [df, row_count, img_search], api_name = "load_playgroundai_images" )
########################## REM-BG
with gr.Tab("Rem_BG"):
color_state = gr.State(value=False)
matting_state = gr.State(value=(0, 0, 0))
gr.HTML("<center><h1>Remove Background Tool</h1></center>")
with gr.Row(equal_height=False):
with gr.Column():
input_img = gr.Image(type="pil", label="Input image")
drp_models = gr.Dropdown(choices=model_choices, label="Model Segment", value="TracerUniversalB7")
with gr.Row():
chk_include_matting = gr.Checkbox(label="Matting", value=False)
chk_smoot_mask = gr.Checkbox(label="Smoot Mask", value=False)
chk_show_mask = gr.Checkbox(label="Show Mask", value=False)
with gr.Box(visible=False) as slider_matting:
slr_fg_threshold = gr.Slider(0, 300, value=270, step=1, label="Alpha matting foreground threshold")
slr_bg_threshold = gr.Slider(0, 50, value=20, step=1, label="Alpha matting background threshold")
slr_erode_size = gr.Slider(0, 20, value=11, step=1, label="Alpha matting erode size")
with gr.Box():
with gr.Row():
chk_change_color = gr.Checkbox(label="Change background color", value=False)
pkr_color = gr.ColorPicker(label="Pick a new color", visible=False)
chk_dominant = gr.Checkbox(label="Use dominant color", value=False, visible=False)
#######################
############################
#############################
run_btn = gr.Button(value="Remove background", variant="primary")
with gr.Column():
output_img = gr.Image(type="pil", label="Image Result")
mask_img = gr.Image(type="pil", label="Image Mask", visible=False)
gr.ClearButton(components=[input_img, output_img, mask_img])
chk_include_matting.change(change_include_matting, inputs=[chk_include_matting],
outputs=[slider_matting, matting_state,
slr_fg_threshold, slr_bg_threshold, slr_erode_size])
slr_bg_threshold.change(change_background_threshold, inputs=[slr_bg_threshold, matting_state],
outputs=[matting_state])
slr_fg_threshold.change(change_foreground_threshold, inputs=[slr_fg_threshold, matting_state],
outputs=[matting_state])
slr_erode_size.change(change_erode_size, inputs=[slr_erode_size, matting_state],
outputs=[matting_state])
chk_show_mask.change(change_show_mask, inputs=[chk_show_mask], outputs=[mask_img])
chk_change_color.change(change_background_mode, inputs=[chk_change_color],
outputs=[pkr_color, chk_dominant])
pkr_color.change(change_picker_color, inputs=[pkr_color, chk_dominant], outputs=[color_state])
chk_dominant.change(set_dominant_color, inputs=[chk_dominant], outputs=[color_state, pkr_color])
run_btn.click(predict, inputs=[input_img, drp_models, chk_smoot_mask, matting_state, color_state],
outputs=[output_img, mask_img])
# text_input = gr.Textbox() ## Diffuser
# image_output = gr.Image()
# image_button = gr.Button("Flip")
# text_button.click(flip_text, inputs=text_input, outputs=text_output)
# image_button.click(flip_image, inputs=image_input, outputs=image_output)
pan.queue(concurrency_count=200)
pan.launch(inline=True, show_api=True, max_threads=400)