JarvisLabs's picture
Update app.py
578d7b2 verified
import os
import gc
import gradio as gr
import numpy as np
import torch
import json
import spaces
import config
from dotenv import load_dotenv
load_dotenv()
import logging
from PIL import Image, PngImagePlugin
from datetime import datetime
import replicate
from config import *
from utils import *
IS_COLAB=False
"""
Changes to base animagine-xl-3.1 log
- Cut the wildcard
- add in lora pipeline
- use let get env variable
- add in lora strenght variable
"""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
DESCRIPTION = "Animagine XL 3.1 X Galverse MAMA Replicate Repo "
IS_COLAB = False
#assert os.environ["REPLICATE_API_TOKEN"], "REPLICATE_API_TOKEN not set "
MIN_IMAGE_SIZE = 512
MAX_IMAGE_SIZE = 2048
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs")
def generate_replicate(
prompt: str,
negative_prompt: str = "",
seed: int = 0,
custom_width: int = 1024,
custom_height: int = 1024,
guidance_scale: float = 7.0,
num_inference_steps: int = 28,
sampler: str = "Euler a",
aspect_ratio_selector: str = "896 x 1152",
lora_strength: float = 0.7,
style_selector: str = "(None)",
quality_selector: str = "Standard v3.1",
styles: str = "",
quality_prompt: str = "",
repo: str="galverse/mama-v1.5.1_leduyson:82d4539e72ec4473d1c34407a378815db55cb2eeb9639b898fcc7b4b67043973",
lora_id: str= "galverse/mama-1.5",
lora_style: str = "sks, galverse ",
progress=gr.Progress(track_tqdm=True),
):
np.random.seed(seed)
width, height = aspect_ratio_handler(
aspect_ratio_selector,
custom_width,
custom_height,
)
# prompt = add_wildcard(prompt, wildcard_files)
if quality_prompt:
prompt, negative_prompt = preprocess_prompt(
quality_prompt, quality_selector, prompt, negative_prompt, add_quality_tags
)
if styles:
prompt, negative_prompt = preprocess_prompt(
styles, style_selector, prompt, negative_prompt
)
width, height = preprocess_image_dimensions(width, height)
metadata = {
"prompt": prompt +", " + lora_style,
"negative_prompt": negative_prompt,
"resolution": f"{width} x {height}",
"guidance_scale": guidance_scale,
"lora_scale":lora_strength,
"num_inference_steps": num_inference_steps,
"seed": seed,
"sampler": sampler,
"width":width,
"height":height,
"num_outputs": 1,
"guidance_scale":guidance_scale,
"apply_watermark": True,
"high_noise_frac": 0.8,
"disable_safety_checker":True,
"lora_id": lora_id,
"seed":seed
}
images = replicate.run(
repo,
input=metadata
)
image_paths = [
save_image_replicate(image, metadata, OUTPUT_DIR, IS_COLAB)
for image in images
]
for image_path in image_paths:
logger.info(f"Image saved as {image_path} with metadata")
return image_paths, metadata
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
quality_prompt = {
k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list
}
with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
title = gr.HTML(
f"""<h1><span>{DESCRIPTION}</span></h1>""",
elem_id="title",
)
gr.Markdown(
f"""Gradio demo for Galverse MAMA 1.5 lora model, current repo [galverse/mama-v1.5.1_leduyson](https://replicate.com/galverse/mama-v1.5.1_leduyson)""",
elem_id="subtitle",
)
gr.DuplicateButton(
value="Duplicate Space for private use",
elem_id="duplicate-button",
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
)
with gr.Row():
with gr.Column(scale=2):
with gr.Tab("Txt2img"):
with gr.Group():
prompt = gr.Text(
label="Prompt",
max_lines=5,
placeholder="Enter your prompt",
)
negative_prompt = gr.Text(
label="Negative Prompt",
max_lines=5,
placeholder="Enter a negative prompt",
)
lora_strength = gr.Slider(
label="Lora style strength",
minimum=0,
maximum=1,
step=0.1,
value=0.7,
)
with gr.Accordion(label="Quality Tags", open=True):
add_quality_tags = gr.Checkbox(
label="Add Quality Tags", value=True
)
quality_selector = gr.Dropdown(
label="Quality Tags Presets",
interactive=True,
choices=list(quality_prompt.keys()),
value="Standard v3.1",
)
with gr.Tab("Advanced Settings"):
with gr.Group():
style_selector = gr.Radio(
label="Style Preset",
container=True,
interactive=True,
choices=list(styles.keys()),
value="(None)",
)
with gr.Group():
aspect_ratio_selector = gr.Radio(
label="Aspect Ratio",
choices=aspect_ratios,
value="896 x 1152",
container=True,
)
with gr.Group(visible=False) as custom_resolution:
with gr.Row():
custom_width = gr.Slider(
label="Width",
minimum=MIN_IMAGE_SIZE,
maximum=MAX_IMAGE_SIZE,
step=8,
value=1024,
)
custom_height = gr.Slider(
label="Height",
minimum=MIN_IMAGE_SIZE,
maximum=MAX_IMAGE_SIZE,
step=8,
value=1024,
)
with gr.Group():
sampler = gr.Dropdown(
label="Sampler",
choices=sampler_list,
interactive=True,
value="DPMSolverMultistep",
)
with gr.Group():
seed = gr.Slider(
label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Group():
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=1,
maximum=12,
step=0.1,
value=7.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
with gr.Column(scale=3):
with gr.Blocks():
run_button = gr.Button("Generate", variant="primary")
result = gr.Gallery(
label="Result",
columns=1,
height='100%',
preview=True,
show_label=False
)
with gr.Accordion(label="Generation Parameters", open=False):
gr_metadata = gr.JSON(label="metadata", show_label=False)
aspect_ratio_selector.change(
fn=lambda x: gr.update(visible=x == "Custom"),
inputs=aspect_ratio_selector,
outputs=custom_resolution,
queue=False,
api_name=False,
)
gr.on(
triggers=[
prompt.submit,
negative_prompt.submit,
run_button.click,
],
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate_replicate,
inputs=[
prompt,
negative_prompt,
seed,
custom_width,
custom_height,
guidance_scale,
num_inference_steps,
sampler,
aspect_ratio_selector,
lora_strength,
style_selector,
quality_selector,
quality_prompt,
],
outputs=[result, gr_metadata],
api_name="run",
)
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)