Test_new_mod / app.py
alexeyGod's picture
Update app.py
a6b4caa verified
raw
history blame
5.23 kB
import gradio as gr
import numpy as np
import random
import spaces
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
import torch
from diffusers import DiffusionPipeline ,StableDiffusionPipeline#, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel
#from diffusers import FluxPipeline
from huggingface_hub import login
import os
a=os.getenv('hf_tok')
login(token=a )
device = "cuda" if torch.cuda.is_available() else "cpu"
model_repo_id = "stabilityai/sdxl-turbo" # "stable-diffusion-v1-5/stable-diffusion-v1-5" # Replace to the model you would like to use
#model_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
#model_repo_id = "cagliostrolab/animagine-xl-4.0"
#model_repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
#model_repo_id ="black-forest-labs/FLUX.1-dev"
if torch.cuda.is_available():
torch_dtype = torch.float16
else:
torch_dtype = torch.float32
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
#pipe.load_lora_weights("RearViewXLV2-CruzFlesh.safetensors") #prompt fo this lora
#huge muscle man in the kitchen ,
#a naked man with loose curl messy blonde hair, rear view,
#<lora:RearViewXlV2-CruzFlesh:0.75>, hairy ass, asshole
pipe.load_lora_weights("FaceNpenisV4XL.safetensors")
pipe = pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
"""rear view
a naked man from behind
a man on all fours
testicles
toes
asshole
penis
hairy ass
hands on ass
saggy balls"""
@spaces.GPU(duration=25) #[uncomment to use ZeroGPU]
def infer(
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
).images[0]
return image, seed
examples = [
"cinematic , cat at table in the room "
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(" # Text-to-Image Gradio Template")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, # Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512, # Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0, # Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=3, # Replace with defaults that work for your model
)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()