Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,921 Bytes
04b1b0f e75a87d 04b1b0f 5f39588 c08c2aa 06f55a7 04b1b0f e75a87d c08c2aa 04b1b0f e75a87d 6cba3af e75a87d 9b0d029 04b1b0f 5f39588 04b1b0f 82d567d 04b1b0f 602d38a f2cee5f 5f39588 7a575f3 5f39588 42fb594 f2cee5f 04b1b0f 573a973 04b1b0f ada2939 9aaa37e 04b1b0f 9aaa37e 04b1b0f 35dbb36 04b1b0f 82d567d 04b1b0f 82d567d 04b1b0f 82d567d 04b1b0f 82d567d 04b1b0f 82d567d 04b1b0f 82d567d 04b1b0f 82d567d 04b1b0f 573a973 04b1b0f c08c2aa db13200 04b1b0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import gradio as gr
import numpy as np
import random
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import spaces
import os
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
device = "cuda"
token=os.environ["TOKEN"]
model_id="aipicasso/emix-0-5"
scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id,subfolder="scheduler",token=token)
pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.bfloat16,token=token)
negative_ti_file = hf_hub_download(repo_id="Aikimi/unaestheticXL_Negative_TI", filename="unaestheticXLv31.safetensors")
state_dict = load_file(negative_ti_file)
pipe.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
pipe.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
pipe = pipe.to(device)
MODEL_NAME = "p1atdev/dart-v1-sft"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) # trust_remote_code is required for tokenizer
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.bfloat16)
model=model.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
@spaces.GPU
def infer(seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
prompt = "<|bos|><rating>rating:sfw, rating:general</rating><copyright>original</copyright><character></character><general><|long|>1girl<|input_end|>"
inputs = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
with torch.no_grad():
outputs = model.generate(inputs, generation_config=model.generation_config)
prompt=tokenizer.decode(outputs[0], skip_special_tokens=True).split("original, ")[1]
negative_prompt="unaestheticXLv31, 3d, photo, realism"
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt = prompt,
negative_prompt = negative_prompt,
guidance_scale = guidance_scale,
num_inference_steps = num_inference_steps,
width = width,
height = height,
generator = generator
).images[0]
return image, prompt
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# 著作権のない画像
## Anime image with No copyright
Generateボタンを押し、画像を生成してください。この画像がいくらきれいであろうと著作権は誰にもありません。この画像は時刻を入力とした自然現象によって作られたものです。美しいとは何でしょうか。
""")
with gr.Row():
run_button = gr.Button("Generate", scale=0)
result = gr.Image(label="Result", show_label=False)
generated_prompt = gr.Textbox(label="Generated prompt", show_label=False, interactive=False)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=1.0,
maximum=10.0,
step=0.1,
value=7.5,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=30,
step=1,
value=20,
)
run_button.click(
fn = infer,
inputs = [seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result,generated_prompt]
)
demo.queue().launch() |