Spaces:
Sleeping
Sleeping
update animagine xl 3
Browse files- .gitattributes +0 -39
- .gitignore +0 -162
- .pre-commit-config.yaml +0 -36
- .style.yapf +0 -5
- .vscode/settings.json +0 -18
- README.md +1 -1
- app.py +122 -141
- demo.ipynb +91 -60
- wildcard/character.txt +0 -0
.gitattributes
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
images/amelia-watson.png filter=lfs diff=lfs merge=lfs -text
|
37 |
-
images/furina.png filter=lfs diff=lfs merge=lfs -text
|
38 |
-
images/pastel-style.png filter=lfs diff=lfs merge=lfs -text
|
39 |
-
images/ufotable-style.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
gradio_cached_examples/
|
2 |
-
|
3 |
-
# Byte-compiled / optimized / DLL files
|
4 |
-
__pycache__/
|
5 |
-
*.py[cod]
|
6 |
-
*$py.class
|
7 |
-
|
8 |
-
# C extensions
|
9 |
-
*.so
|
10 |
-
|
11 |
-
# Distribution / packaging
|
12 |
-
.Python
|
13 |
-
build/
|
14 |
-
develop-eggs/
|
15 |
-
dist/
|
16 |
-
downloads/
|
17 |
-
eggs/
|
18 |
-
.eggs/
|
19 |
-
lib/
|
20 |
-
lib64/
|
21 |
-
parts/
|
22 |
-
sdist/
|
23 |
-
var/
|
24 |
-
wheels/
|
25 |
-
share/python-wheels/
|
26 |
-
*.egg-info/
|
27 |
-
.installed.cfg
|
28 |
-
*.egg
|
29 |
-
MANIFEST
|
30 |
-
|
31 |
-
# PyInstaller
|
32 |
-
# Usually these files are written by a python script from a template
|
33 |
-
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
34 |
-
*.manifest
|
35 |
-
*.spec
|
36 |
-
|
37 |
-
# Installer logs
|
38 |
-
pip-log.txt
|
39 |
-
pip-delete-this-directory.txt
|
40 |
-
|
41 |
-
# Unit test / coverage reports
|
42 |
-
htmlcov/
|
43 |
-
.tox/
|
44 |
-
.nox/
|
45 |
-
.coverage
|
46 |
-
.coverage.*
|
47 |
-
.cache
|
48 |
-
nosetests.xml
|
49 |
-
coverage.xml
|
50 |
-
*.cover
|
51 |
-
*.py,cover
|
52 |
-
.hypothesis/
|
53 |
-
.pytest_cache/
|
54 |
-
cover/
|
55 |
-
|
56 |
-
# Translations
|
57 |
-
*.mo
|
58 |
-
*.pot
|
59 |
-
|
60 |
-
# Django stuff:
|
61 |
-
*.log
|
62 |
-
local_settings.py
|
63 |
-
db.sqlite3
|
64 |
-
db.sqlite3-journal
|
65 |
-
|
66 |
-
# Flask stuff:
|
67 |
-
instance/
|
68 |
-
.webassets-cache
|
69 |
-
|
70 |
-
# Scrapy stuff:
|
71 |
-
.scrapy
|
72 |
-
|
73 |
-
# Sphinx documentation
|
74 |
-
docs/_build/
|
75 |
-
|
76 |
-
# PyBuilder
|
77 |
-
.pybuilder/
|
78 |
-
target/
|
79 |
-
|
80 |
-
# Jupyter Notebook
|
81 |
-
.ipynb_checkpoints
|
82 |
-
|
83 |
-
# IPython
|
84 |
-
profile_default/
|
85 |
-
ipython_config.py
|
86 |
-
|
87 |
-
# pyenv
|
88 |
-
# For a library or package, you might want to ignore these files since the code is
|
89 |
-
# intended to run in multiple environments; otherwise, check them in:
|
90 |
-
# .python-version
|
91 |
-
|
92 |
-
# pipenv
|
93 |
-
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
94 |
-
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
95 |
-
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
96 |
-
# install all needed dependencies.
|
97 |
-
#Pipfile.lock
|
98 |
-
|
99 |
-
# poetry
|
100 |
-
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
101 |
-
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
102 |
-
# commonly ignored for libraries.
|
103 |
-
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
104 |
-
#poetry.lock
|
105 |
-
|
106 |
-
# pdm
|
107 |
-
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
108 |
-
#pdm.lock
|
109 |
-
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
110 |
-
# in version control.
|
111 |
-
# https://pdm.fming.dev/#use-with-ide
|
112 |
-
.pdm.toml
|
113 |
-
|
114 |
-
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
115 |
-
__pypackages__/
|
116 |
-
|
117 |
-
# Celery stuff
|
118 |
-
celerybeat-schedule
|
119 |
-
celerybeat.pid
|
120 |
-
|
121 |
-
# SageMath parsed files
|
122 |
-
*.sage.py
|
123 |
-
|
124 |
-
# Environments
|
125 |
-
.env
|
126 |
-
.venv
|
127 |
-
env/
|
128 |
-
venv/
|
129 |
-
ENV/
|
130 |
-
env.bak/
|
131 |
-
venv.bak/
|
132 |
-
|
133 |
-
# Spyder project settings
|
134 |
-
.spyderproject
|
135 |
-
.spyproject
|
136 |
-
|
137 |
-
# Rope project settings
|
138 |
-
.ropeproject
|
139 |
-
|
140 |
-
# mkdocs documentation
|
141 |
-
/site
|
142 |
-
|
143 |
-
# mypy
|
144 |
-
.mypy_cache/
|
145 |
-
.dmypy.json
|
146 |
-
dmypy.json
|
147 |
-
|
148 |
-
# Pyre type checker
|
149 |
-
.pyre/
|
150 |
-
|
151 |
-
# pytype static type analyzer
|
152 |
-
.pytype/
|
153 |
-
|
154 |
-
# Cython debug symbols
|
155 |
-
cython_debug/
|
156 |
-
|
157 |
-
# PyCharm
|
158 |
-
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
159 |
-
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
-
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
-
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
-
#.idea/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.pre-commit-config.yaml
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
repos:
|
2 |
-
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
-
rev: v4.2.0
|
4 |
-
hooks:
|
5 |
-
- id: check-executables-have-shebangs
|
6 |
-
- id: check-json
|
7 |
-
- id: check-merge-conflict
|
8 |
-
- id: check-shebang-scripts-are-executable
|
9 |
-
- id: check-toml
|
10 |
-
- id: check-yaml
|
11 |
-
- id: double-quote-string-fixer
|
12 |
-
- id: end-of-file-fixer
|
13 |
-
- id: mixed-line-ending
|
14 |
-
args: ['--fix=lf']
|
15 |
-
- id: requirements-txt-fixer
|
16 |
-
- id: trailing-whitespace
|
17 |
-
- repo: https://github.com/myint/docformatter
|
18 |
-
rev: v1.4
|
19 |
-
hooks:
|
20 |
-
- id: docformatter
|
21 |
-
args: ['--in-place']
|
22 |
-
- repo: https://github.com/pycqa/isort
|
23 |
-
rev: 5.12.0
|
24 |
-
hooks:
|
25 |
-
- id: isort
|
26 |
-
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
-
rev: v0.991
|
28 |
-
hooks:
|
29 |
-
- id: mypy
|
30 |
-
args: ['--ignore-missing-imports']
|
31 |
-
additional_dependencies: ['types-python-slugify']
|
32 |
-
- repo: https://github.com/google/yapf
|
33 |
-
rev: v0.32.0
|
34 |
-
hooks:
|
35 |
-
- id: yapf
|
36 |
-
args: ['--parallel', '--in-place']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.style.yapf
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
[style]
|
2 |
-
based_on_style = pep8
|
3 |
-
blank_line_before_nested_class_or_def = false
|
4 |
-
spaces_before_comment = 2
|
5 |
-
split_before_logical_operator = true
|
|
|
|
|
|
|
|
|
|
|
|
.vscode/settings.json
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"python.linting.enabled": true,
|
3 |
-
"python.linting.flake8Enabled": true,
|
4 |
-
"python.linting.pylintEnabled": false,
|
5 |
-
"python.linting.lintOnSave": true,
|
6 |
-
"python.formatting.provider": "yapf",
|
7 |
-
"python.formatting.yapfArgs": [
|
8 |
-
"--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
|
9 |
-
],
|
10 |
-
"[python]": {
|
11 |
-
"editor.formatOnType": true,
|
12 |
-
"editor.codeActionsOnSave": {
|
13 |
-
"source.organizeImports": true
|
14 |
-
}
|
15 |
-
},
|
16 |
-
"editor.formatOnSave": true,
|
17 |
-
"files.insertFinalNewline": true
|
18 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title: Animagine XL
|
3 |
emoji: 🌍
|
4 |
colorFrom: gray
|
5 |
colorTo: purple
|
|
|
1 |
---
|
2 |
+
title: Animagine XL 3.0
|
3 |
emoji: 🌍
|
4 |
colorFrom: gray
|
5 |
colorTo: purple
|
app.py
CHANGED
@@ -16,6 +16,8 @@ import base64
|
|
16 |
import safetensors
|
17 |
from io import BytesIO
|
18 |
from typing import Tuple
|
|
|
|
|
19 |
import gradio_user_history as gr_user_history
|
20 |
from huggingface_hub import hf_hub_download
|
21 |
from safetensors.torch import load_file
|
@@ -23,7 +25,6 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
|
|
23 |
from lora_diffusers import LoRANetwork, create_network_from_weights
|
24 |
from diffusers.models import AutoencoderKL
|
25 |
from diffusers import (
|
26 |
-
LCMScheduler,
|
27 |
StableDiffusionXLPipeline,
|
28 |
StableDiffusionXLImg2ImgPipeline,
|
29 |
DPMSolverMultistepScheduler,
|
@@ -38,12 +39,10 @@ from diffusers import (
|
|
38 |
UniPCMultistepScheduler,
|
39 |
)
|
40 |
|
41 |
-
DESCRIPTION = "Animagine XL
|
42 |
-
|
43 |
if not torch.cuda.is_available():
|
44 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU. </p>"
|
45 |
IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1"
|
46 |
-
ENABLE_REFINER_PROMPT = os.getenv("ENABLE_REFINER_PROMPT") == "1"
|
47 |
MAX_SEED = np.iinfo(np.int32).max
|
48 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
49 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
@@ -52,7 +51,7 @@ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
|
|
52 |
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
|
53 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
54 |
|
55 |
-
MODEL = os.getenv("MODEL", "Linaqruf/animagine-xl-
|
56 |
|
57 |
torch.backends.cudnn.deterministic = True
|
58 |
torch.backends.cudnn.benchmark = False
|
@@ -60,17 +59,13 @@ torch.backends.cudnn.benchmark = False
|
|
60 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
61 |
|
62 |
if torch.cuda.is_available():
|
63 |
-
if ENABLE_REFINER_PROMPT:
|
64 |
-
tokenizer = AutoTokenizer.from_pretrained("isek-ai/SDPrompt-RetNet-300M")
|
65 |
-
tuner = AutoModelForCausalLM.from_pretrained(
|
66 |
-
"isek-ai/SDPrompt-RetNet-300M",
|
67 |
-
trust_remote_code=True,
|
68 |
-
).to(device)
|
69 |
vae = AutoencoderKL.from_pretrained(
|
70 |
"madebyollin/sdxl-vae-fp16-fix",
|
71 |
torch_dtype=torch.float16,
|
72 |
)
|
73 |
-
|
|
|
|
|
74 |
MODEL,
|
75 |
vae=vae,
|
76 |
torch_dtype=torch.float16,
|
@@ -100,7 +95,6 @@ def seed_everything(seed):
|
|
100 |
torch.manual_seed(seed)
|
101 |
torch.cuda.manual_seed_all(seed)
|
102 |
np.random.seed(seed)
|
103 |
-
random.seed(seed)
|
104 |
generator = torch.Generator()
|
105 |
generator.manual_seed(seed)
|
106 |
return generator
|
@@ -115,13 +109,6 @@ def get_image_path(base_path: str):
|
|
115 |
return None
|
116 |
|
117 |
|
118 |
-
def update_lcm_parameter(enable_lcm: bool = False):
|
119 |
-
if enable_lcm:
|
120 |
-
return (2, 8, gr.update(value="LCM"), gr.update(choices=["LCM"]))
|
121 |
-
else:
|
122 |
-
return (12, 50, gr.update(value="Euler a"), gr.update(choices=sampler_list))
|
123 |
-
|
124 |
-
|
125 |
def update_selection(selected_state: gr.SelectData):
|
126 |
lora_repo = sdxl_loras[selected_state.index]["repo"]
|
127 |
lora_weight = sdxl_loras[selected_state.index]["multiplier"]
|
@@ -179,7 +166,6 @@ def get_scheduler(scheduler_config, name):
|
|
179 |
scheduler_config
|
180 |
),
|
181 |
"DDIM": lambda: DDIMScheduler.from_config(scheduler_config),
|
182 |
-
"LCM": lambda: LCMScheduler.from_config(scheduler_config),
|
183 |
}
|
184 |
return scheduler_map.get(name, lambda: None)()
|
185 |
|
@@ -194,10 +180,17 @@ def preprocess_prompt(
|
|
194 |
style_name: str,
|
195 |
positive: str,
|
196 |
negative: str = "",
|
|
|
197 |
) -> Tuple[str, str]:
|
198 |
-
p, n = style_dict.get(style_name,
|
199 |
|
200 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
|
202 |
|
203 |
def common_upscale(samples, width, height, upscale_method):
|
@@ -213,45 +206,6 @@ def upscale(samples, upscale_method, scale_by):
|
|
213 |
return s
|
214 |
|
215 |
|
216 |
-
def prompt_completion(
|
217 |
-
input_text,
|
218 |
-
max_new_tokens=128,
|
219 |
-
do_sample=True,
|
220 |
-
temperature=1.0,
|
221 |
-
top_p=0.95,
|
222 |
-
top_k=20,
|
223 |
-
repetition_penalty=1.2,
|
224 |
-
num_beams=1,
|
225 |
-
):
|
226 |
-
try:
|
227 |
-
if input_text.strip() == "":
|
228 |
-
return ""
|
229 |
-
|
230 |
-
inputs = tokenizer(
|
231 |
-
f"<s>{input_text}", return_tensors="pt", add_special_tokens=False
|
232 |
-
)["input_ids"].to(device)
|
233 |
-
|
234 |
-
result = tuner.generate(
|
235 |
-
inputs,
|
236 |
-
max_new_tokens=max_new_tokens,
|
237 |
-
do_sample=do_sample,
|
238 |
-
temperature=temperature,
|
239 |
-
top_p=top_p,
|
240 |
-
top_k=top_k,
|
241 |
-
repetition_penalty=repetition_penalty,
|
242 |
-
num_beams=num_beams,
|
243 |
-
)
|
244 |
-
|
245 |
-
return tokenizer.batch_decode(result, skip_special_tokens=True)[0]
|
246 |
-
|
247 |
-
except Exception as e:
|
248 |
-
print(f"An error occured: {e}")
|
249 |
-
raise
|
250 |
-
|
251 |
-
finally:
|
252 |
-
free_memory()
|
253 |
-
|
254 |
-
|
255 |
def load_and_convert_thumbnail(model_path: str):
|
256 |
with safetensors.safe_open(model_path, framework="pt") as f:
|
257 |
metadata = f.metadata()
|
@@ -263,6 +217,27 @@ def load_and_convert_thumbnail(model_path: str):
|
|
263 |
return image
|
264 |
return None
|
265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
|
267 |
def generate(
|
268 |
prompt: str,
|
@@ -270,20 +245,19 @@ def generate(
|
|
270 |
seed: int = 0,
|
271 |
custom_width: int = 1024,
|
272 |
custom_height: int = 1024,
|
273 |
-
guidance_scale: float =
|
274 |
-
num_inference_steps: int =
|
275 |
use_lora: bool = False,
|
276 |
lora_weight: float = 1.0,
|
277 |
selected_state: str = "",
|
278 |
-
enable_lcm: bool = False,
|
279 |
sampler: str = "Euler a",
|
280 |
-
aspect_ratio_selector: str = "
|
281 |
style_selector: str = "(None)",
|
282 |
quality_selector: str = "Standard",
|
283 |
use_upscaler: bool = False,
|
284 |
upscaler_strength: float = 0.5,
|
285 |
upscale_by: float = 1.5,
|
286 |
-
|
287 |
profile: gr.OAuthProfile | None = None,
|
288 |
progress=gr.Progress(track_tqdm=True),
|
289 |
) -> PIL.Image.Image:
|
@@ -291,7 +265,6 @@ def generate(
|
|
291 |
|
292 |
network = None
|
293 |
network_state = {"current_lora": None, "multiplier": None}
|
294 |
-
adapter_id = "Linaqruf/lcm-lora-sdxl-rank1"
|
295 |
|
296 |
width, height = aspect_ratio_handler(
|
297 |
aspect_ratio_selector,
|
@@ -299,14 +272,11 @@ def generate(
|
|
299 |
custom_height,
|
300 |
)
|
301 |
|
302 |
-
|
303 |
-
if refine_prompt:
|
304 |
-
if not prompt:
|
305 |
-
prompt = random.choice(["1girl, solo", "1boy, solo"])
|
306 |
-
prompt = prompt_completion(prompt)
|
307 |
|
|
|
308 |
prompt, negative_prompt = preprocess_prompt(
|
309 |
-
quality_prompt, quality_selector, prompt, negative_prompt
|
310 |
)
|
311 |
prompt, negative_prompt = preprocess_prompt(
|
312 |
styles, style_selector, prompt, negative_prompt
|
@@ -316,6 +286,7 @@ def generate(
|
|
316 |
width = width - (width % 8)
|
317 |
if height % 8 != 0:
|
318 |
height = height - (height % 8)
|
|
|
319 |
if use_lora:
|
320 |
if not selected_state:
|
321 |
raise Exception("You must Select a LoRA")
|
@@ -354,9 +325,6 @@ def generate(
|
|
354 |
"multiplier": None,
|
355 |
}
|
356 |
|
357 |
-
if enable_lcm:
|
358 |
-
pipe.load_lora_weights(adapter_id)
|
359 |
-
|
360 |
backup_scheduler = pipe.scheduler
|
361 |
pipe.scheduler = get_scheduler(pipe.scheduler.config, sampler)
|
362 |
|
@@ -371,10 +339,9 @@ def generate(
|
|
371 |
"num_inference_steps": num_inference_steps,
|
372 |
"seed": seed,
|
373 |
"sampler": sampler,
|
374 |
-
"enable_lcm": enable_lcm,
|
375 |
"sdxl_style": style_selector,
|
|
|
376 |
"quality_tags": quality_selector,
|
377 |
-
"refine_prompt": refine_prompt,
|
378 |
}
|
379 |
|
380 |
if use_lora:
|
@@ -440,9 +407,24 @@ def generate(
|
|
440 |
profile=profile,
|
441 |
metadata=metadata,
|
442 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
443 |
return image, metadata
|
|
|
444 |
except Exception as e:
|
445 |
-
print(f"An error
|
446 |
raise
|
447 |
finally:
|
448 |
if network:
|
@@ -450,8 +432,6 @@ def generate(
|
|
450 |
network = None
|
451 |
if use_lora:
|
452 |
del lora_sd, text_encoders
|
453 |
-
if enable_lcm:
|
454 |
-
pipe.unload_lora_weights()
|
455 |
if use_upscaler:
|
456 |
del upscaler_pipe
|
457 |
pipe.scheduler = backup_scheduler
|
@@ -459,33 +439,33 @@ def generate(
|
|
459 |
|
460 |
|
461 |
examples = [
|
462 |
-
"
|
463 |
-
"
|
464 |
-
"
|
465 |
-
"
|
466 |
-
"a girl
|
467 |
]
|
468 |
|
469 |
quality_prompt_list = [
|
470 |
{
|
471 |
"name": "(None)",
|
472 |
"prompt": "{prompt}",
|
473 |
-
"negative_prompt": "",
|
474 |
},
|
475 |
{
|
476 |
"name": "Standard",
|
477 |
-
"prompt": "masterpiece, best quality
|
478 |
-
"negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry",
|
479 |
},
|
480 |
{
|
481 |
"name": "Light",
|
482 |
-
"prompt": "(masterpiece), best quality,
|
483 |
-
"negative_prompt": "(low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn",
|
484 |
},
|
485 |
{
|
486 |
"name": "Heavy",
|
487 |
-
"prompt": "(masterpiece), (best quality), (ultra-detailed),
|
488 |
-
"negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality",
|
489 |
},
|
490 |
]
|
491 |
|
@@ -519,48 +499,48 @@ style_list = [
|
|
519 |
},
|
520 |
{
|
521 |
"name": "Cinematic",
|
522 |
-
"prompt": "
|
523 |
-
"negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
|
524 |
},
|
525 |
{
|
526 |
"name": "Photographic",
|
527 |
-
"prompt": "
|
528 |
-
"negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
|
529 |
},
|
530 |
{
|
531 |
"name": "Anime",
|
532 |
-
"prompt": "
|
533 |
-
"negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
|
534 |
},
|
535 |
{
|
536 |
"name": "Manga",
|
537 |
-
"prompt": "
|
538 |
-
"negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
|
539 |
},
|
540 |
{
|
541 |
"name": "Digital Art",
|
542 |
-
"prompt": "
|
543 |
-
"negative_prompt": "photo, photorealistic, realism, ugly",
|
544 |
},
|
545 |
{
|
546 |
"name": "Pixel art",
|
547 |
-
"prompt": "
|
548 |
-
"negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
|
549 |
},
|
550 |
{
|
551 |
"name": "Fantasy art",
|
552 |
-
"prompt": "ethereal fantasy concept art
|
553 |
-
"negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
|
554 |
},
|
555 |
{
|
556 |
"name": "Neonpunk",
|
557 |
-
"prompt": "
|
558 |
-
"negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
|
559 |
},
|
560 |
{
|
561 |
"name": "3D Model",
|
562 |
-
"prompt": "professional 3d model
|
563 |
-
"negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
|
564 |
},
|
565 |
]
|
566 |
|
@@ -601,15 +581,35 @@ quality_prompt = {
|
|
601 |
# for item in sdxl_loras
|
602 |
# ]
|
603 |
|
|
|
|
|
604 |
with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
605 |
title = gr.HTML(
|
606 |
f"""<h1><span>{DESCRIPTION}</span></h1>""",
|
607 |
elem_id="title",
|
608 |
)
|
609 |
gr.Markdown(
|
610 |
-
f"""Gradio demo for [
|
611 |
elem_id="subtitle",
|
612 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
613 |
gr.DuplicateButton(
|
614 |
value="Duplicate Space for private use",
|
615 |
elem_id="duplicate-button",
|
@@ -630,22 +630,16 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
|
630 |
max_lines=5,
|
631 |
placeholder="Enter a negative prompt",
|
632 |
)
|
633 |
-
with gr.Accordion(label="Quality
|
|
|
634 |
quality_selector = gr.Dropdown(
|
635 |
-
label="Quality
|
636 |
-
show_label=False,
|
637 |
interactive=True,
|
638 |
choices=list(quality_prompt.keys()),
|
639 |
value="Standard",
|
640 |
)
|
641 |
with gr.Row():
|
642 |
-
enable_lcm = gr.Checkbox(label="Enable LCM", value=False)
|
643 |
use_lora = gr.Checkbox(label="Use LoRA", value=False)
|
644 |
-
refine_prompt = gr.Checkbox(
|
645 |
-
label="Refine prompt",
|
646 |
-
value=False,
|
647 |
-
visible=ENABLE_REFINER_PROMPT,
|
648 |
-
)
|
649 |
with gr.Group(visible=False) as lora_group:
|
650 |
selector_info = gr.Text(
|
651 |
label="Selected LoRA",
|
@@ -679,7 +673,7 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
|
679 |
aspect_ratio_selector = gr.Radio(
|
680 |
label="Aspect Ratio",
|
681 |
choices=aspect_ratios,
|
682 |
-
value="
|
683 |
container=True,
|
684 |
)
|
685 |
with gr.Group():
|
@@ -735,16 +729,16 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
|
735 |
guidance_scale = gr.Slider(
|
736 |
label="Guidance scale",
|
737 |
minimum=1,
|
738 |
-
maximum=
|
739 |
step=0.1,
|
740 |
-
value=
|
741 |
)
|
742 |
num_inference_steps = gr.Slider(
|
743 |
label="Number of inference steps",
|
744 |
minimum=1,
|
745 |
-
maximum=
|
746 |
step=1,
|
747 |
-
value=
|
748 |
)
|
749 |
|
750 |
with gr.Tab("Past Generation"):
|
@@ -773,18 +767,6 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
|
773 |
queue=False,
|
774 |
show_progress=False,
|
775 |
)
|
776 |
-
enable_lcm.change(
|
777 |
-
update_lcm_parameter,
|
778 |
-
inputs=enable_lcm,
|
779 |
-
outputs=[
|
780 |
-
guidance_scale,
|
781 |
-
num_inference_steps,
|
782 |
-
sampler,
|
783 |
-
sampler,
|
784 |
-
],
|
785 |
-
queue=False,
|
786 |
-
api_name=False,
|
787 |
-
)
|
788 |
use_lora.change(
|
789 |
fn=lambda x: gr.update(visible=x),
|
790 |
inputs=use_lora,
|
@@ -818,7 +800,6 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
|
818 |
use_lora,
|
819 |
lora_weight,
|
820 |
selected_state,
|
821 |
-
enable_lcm,
|
822 |
sampler,
|
823 |
aspect_ratio_selector,
|
824 |
style_selector,
|
@@ -826,7 +807,7 @@ with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
|
826 |
use_upscaler,
|
827 |
upscaler_strength,
|
828 |
upscale_by,
|
829 |
-
|
830 |
]
|
831 |
|
832 |
prompt.submit(
|
|
|
16 |
import safetensors
|
17 |
from io import BytesIO
|
18 |
from typing import Tuple
|
19 |
+
from datetime import datetime
|
20 |
+
from PIL import PngImagePlugin
|
21 |
import gradio_user_history as gr_user_history
|
22 |
from huggingface_hub import hf_hub_download
|
23 |
from safetensors.torch import load_file
|
|
|
25 |
from lora_diffusers import LoRANetwork, create_network_from_weights
|
26 |
from diffusers.models import AutoencoderKL
|
27 |
from diffusers import (
|
|
|
28 |
StableDiffusionXLPipeline,
|
29 |
StableDiffusionXLImg2ImgPipeline,
|
30 |
DPMSolverMultistepScheduler,
|
|
|
39 |
UniPCMultistepScheduler,
|
40 |
)
|
41 |
|
42 |
+
DESCRIPTION = "Animagine XL 3.0"
|
|
|
43 |
if not torch.cuda.is_available():
|
44 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU. </p>"
|
45 |
IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1"
|
|
|
46 |
MAX_SEED = np.iinfo(np.int32).max
|
47 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
48 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
|
|
51 |
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
|
52 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
53 |
|
54 |
+
MODEL = os.getenv("MODEL", "https://huggingface.co/Linaqruf/animagine-xl-3.0/blob/main/animagine-xl-3.0.safetensors")
|
55 |
|
56 |
torch.backends.cudnn.deterministic = True
|
57 |
torch.backends.cudnn.benchmark = False
|
|
|
59 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
60 |
|
61 |
if torch.cuda.is_available():
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
vae = AutoencoderKL.from_pretrained(
|
63 |
"madebyollin/sdxl-vae-fp16-fix",
|
64 |
torch_dtype=torch.float16,
|
65 |
)
|
66 |
+
pipeline = StableDiffusionXLPipeline.from_single_file if MODEL.endswith(".safetensors") else StableDiffusionXLPipeline.from_pretrained
|
67 |
+
|
68 |
+
pipe = pipeline(
|
69 |
MODEL,
|
70 |
vae=vae,
|
71 |
torch_dtype=torch.float16,
|
|
|
95 |
torch.manual_seed(seed)
|
96 |
torch.cuda.manual_seed_all(seed)
|
97 |
np.random.seed(seed)
|
|
|
98 |
generator = torch.Generator()
|
99 |
generator.manual_seed(seed)
|
100 |
return generator
|
|
|
109 |
return None
|
110 |
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
def update_selection(selected_state: gr.SelectData):
|
113 |
lora_repo = sdxl_loras[selected_state.index]["repo"]
|
114 |
lora_weight = sdxl_loras[selected_state.index]["multiplier"]
|
|
|
166 |
scheduler_config
|
167 |
),
|
168 |
"DDIM": lambda: DDIMScheduler.from_config(scheduler_config),
|
|
|
169 |
}
|
170 |
return scheduler_map.get(name, lambda: None)()
|
171 |
|
|
|
180 |
style_name: str,
|
181 |
positive: str,
|
182 |
negative: str = "",
|
183 |
+
add_style: bool = True,
|
184 |
) -> Tuple[str, str]:
|
185 |
+
p, n = style_dict.get(style_name, style_dict["(None)"])
|
186 |
|
187 |
+
if add_style and positive.strip():
|
188 |
+
formatted_positive = p.format(prompt=positive)
|
189 |
+
else:
|
190 |
+
formatted_positive = positive
|
191 |
+
|
192 |
+
combined_negative = n + negative
|
193 |
+
return formatted_positive, combined_negative
|
194 |
|
195 |
|
196 |
def common_upscale(samples, width, height, upscale_method):
|
|
|
206 |
return s
|
207 |
|
208 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
def load_and_convert_thumbnail(model_path: str):
|
210 |
with safetensors.safe_open(model_path, framework="pt") as f:
|
211 |
metadata = f.metadata()
|
|
|
217 |
return image
|
218 |
return None
|
219 |
|
220 |
+
def load_wildcard_files(wildcard_dir):
|
221 |
+
wildcard_files = {}
|
222 |
+
for file in os.listdir(wildcard_dir):
|
223 |
+
if file.endswith(".txt"):
|
224 |
+
key = f"__{file.split('.')[0]}__" # Create a key like __character__
|
225 |
+
wildcard_files[key] = os.path.join(wildcard_dir, file)
|
226 |
+
return wildcard_files
|
227 |
+
|
228 |
+
def get_random_line_from_file(file_path):
|
229 |
+
with open(file_path, 'r') as file:
|
230 |
+
lines = file.readlines()
|
231 |
+
if not lines:
|
232 |
+
return ""
|
233 |
+
return random.choice(lines).strip()
|
234 |
+
|
235 |
+
def add_wildcard(prompt, wildcard_files):
|
236 |
+
for key, file_path in wildcard_files.items():
|
237 |
+
if key in prompt:
|
238 |
+
wildcard_line = get_random_line_from_file(file_path)
|
239 |
+
prompt = prompt.replace(key, wildcard_line)
|
240 |
+
return prompt
|
241 |
|
242 |
def generate(
|
243 |
prompt: str,
|
|
|
245 |
seed: int = 0,
|
246 |
custom_width: int = 1024,
|
247 |
custom_height: int = 1024,
|
248 |
+
guidance_scale: float = 7.0,
|
249 |
+
num_inference_steps: int = 28,
|
250 |
use_lora: bool = False,
|
251 |
lora_weight: float = 1.0,
|
252 |
selected_state: str = "",
|
|
|
253 |
sampler: str = "Euler a",
|
254 |
+
aspect_ratio_selector: str = "896 x 1152",
|
255 |
style_selector: str = "(None)",
|
256 |
quality_selector: str = "Standard",
|
257 |
use_upscaler: bool = False,
|
258 |
upscaler_strength: float = 0.5,
|
259 |
upscale_by: float = 1.5,
|
260 |
+
add_quality_tags: bool = True,
|
261 |
profile: gr.OAuthProfile | None = None,
|
262 |
progress=gr.Progress(track_tqdm=True),
|
263 |
) -> PIL.Image.Image:
|
|
|
265 |
|
266 |
network = None
|
267 |
network_state = {"current_lora": None, "multiplier": None}
|
|
|
268 |
|
269 |
width, height = aspect_ratio_handler(
|
270 |
aspect_ratio_selector,
|
|
|
272 |
custom_height,
|
273 |
)
|
274 |
|
275 |
+
prompt = add_wildcard(prompt, wildcard_files)
|
|
|
|
|
|
|
|
|
276 |
|
277 |
+
|
278 |
prompt, negative_prompt = preprocess_prompt(
|
279 |
+
quality_prompt, quality_selector, prompt, negative_prompt, add_quality_tags
|
280 |
)
|
281 |
prompt, negative_prompt = preprocess_prompt(
|
282 |
styles, style_selector, prompt, negative_prompt
|
|
|
286 |
width = width - (width % 8)
|
287 |
if height % 8 != 0:
|
288 |
height = height - (height % 8)
|
289 |
+
|
290 |
if use_lora:
|
291 |
if not selected_state:
|
292 |
raise Exception("You must Select a LoRA")
|
|
|
325 |
"multiplier": None,
|
326 |
}
|
327 |
|
|
|
|
|
|
|
328 |
backup_scheduler = pipe.scheduler
|
329 |
pipe.scheduler = get_scheduler(pipe.scheduler.config, sampler)
|
330 |
|
|
|
339 |
"num_inference_steps": num_inference_steps,
|
340 |
"seed": seed,
|
341 |
"sampler": sampler,
|
|
|
342 |
"sdxl_style": style_selector,
|
343 |
+
"add_quality_tags": add_quality_tags,
|
344 |
"quality_tags": quality_selector,
|
|
|
345 |
}
|
346 |
|
347 |
if use_lora:
|
|
|
407 |
profile=profile,
|
408 |
metadata=metadata,
|
409 |
)
|
410 |
+
if image and IS_COLAB:
|
411 |
+
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
|
412 |
+
output_directory = "./outputs"
|
413 |
+
os.makedirs(output_directory, exist_ok=True)
|
414 |
+
filename = f"image_{current_time}.png"
|
415 |
+
filepath = os.path.join(output_directory, filename)
|
416 |
+
|
417 |
+
# Convert metadata to a string and save as a text chunk in the PNG
|
418 |
+
metadata_str = json.dumps(metadata)
|
419 |
+
info = PngImagePlugin.PngInfo()
|
420 |
+
info.add_text("metadata", metadata_str)
|
421 |
+
image.save(filepath, "PNG", pnginfo=info)
|
422 |
+
print(f"Image saved as {filepath} with metadata")
|
423 |
+
|
424 |
return image, metadata
|
425 |
+
|
426 |
except Exception as e:
|
427 |
+
print(f"An error occurred: {e}")
|
428 |
raise
|
429 |
finally:
|
430 |
if network:
|
|
|
432 |
network = None
|
433 |
if use_lora:
|
434 |
del lora_sd, text_encoders
|
|
|
|
|
435 |
if use_upscaler:
|
436 |
del upscaler_pipe
|
437 |
pipe.scheduler = backup_scheduler
|
|
|
439 |
|
440 |
|
441 |
examples = [
|
442 |
+
"1girl, arima kana, oshi no ko, solo, idol, idol clothes, one eye closed, red shirt, black skirt, black headwear, gloves, stage light, singing, open mouth, crowd, smile, pointing at viewer",
|
443 |
+
"1girl, c.c., code geass, white shirt, long sleeves, turtleneck, sitting, looking at viewer, eating, pizza, plate, fork, knife, table, chair, table, restaurant, cinematic angle, cinematic lighting",
|
444 |
+
"1girl, sakurauchi riko, \(love live\), queen hat, noble coat, red coat, noble shirt, sitting, crossed legs, gentle smile, parted lips, throne, cinematic angle",
|
445 |
+
"1girl, amiya \(arknights\), arknights, dirty face, outstretched hand, close-up, cinematic angle, foreshortening, dark, dark background",
|
446 |
+
"A boy and a girl, Emiya Shirou and Artoria Pendragon from fate series, having their breakfast in the dining room. Emiya Shirou wears white t-shirt and jacket. Artoria Pendragon wears white dress with blue neck ribbon. Rice, soup, and minced meats are served on the table. They look at each other while smiling happily",
|
447 |
]
|
448 |
|
449 |
quality_prompt_list = [
|
450 |
{
|
451 |
"name": "(None)",
|
452 |
"prompt": "{prompt}",
|
453 |
+
"negative_prompt": "nsfw, lowres, ",
|
454 |
},
|
455 |
{
|
456 |
"name": "Standard",
|
457 |
+
"prompt": "{prompt}, masterpiece, best quality",
|
458 |
+
"negative_prompt": "nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, ",
|
459 |
},
|
460 |
{
|
461 |
"name": "Light",
|
462 |
+
"prompt": "{prompt}, (masterpiece), best quality, perfect face",
|
463 |
+
"negative_prompt": "nsfw, (low quality, worst quality:1.2), 3d, watermark, signature, ugly, poorly drawn, ",
|
464 |
},
|
465 |
{
|
466 |
"name": "Heavy",
|
467 |
+
"prompt": "{prompt}, (masterpiece), (best quality), (ultra-detailed), illustration, disheveled hair, perfect composition, moist skin, intricate details, earrings",
|
468 |
+
"negative_prompt": "nsfw, longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, ",
|
469 |
},
|
470 |
]
|
471 |
|
|
|
499 |
},
|
500 |
{
|
501 |
"name": "Cinematic",
|
502 |
+
"prompt": "{prompt}, cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
|
503 |
+
"negative_prompt": "nsfw, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
|
504 |
},
|
505 |
{
|
506 |
"name": "Photographic",
|
507 |
+
"prompt": "{prompt}, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
|
508 |
+
"negative_prompt": "nsfw, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
|
509 |
},
|
510 |
{
|
511 |
"name": "Anime",
|
512 |
+
"prompt": "{prompt}, anime artwork, anime style, key visual, vibrant, studio anime, highly detailed",
|
513 |
+
"negative_prompt": "nsfw, photo, deformed, black and white, realism, disfigured, low contrast",
|
514 |
},
|
515 |
{
|
516 |
"name": "Manga",
|
517 |
+
"prompt": "{prompt}, manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
|
518 |
+
"negative_prompt": "nsfw, ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
|
519 |
},
|
520 |
{
|
521 |
"name": "Digital Art",
|
522 |
+
"prompt": "{prompt}, concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
|
523 |
+
"negative_prompt": "nsfw, photo, photorealistic, realism, ugly",
|
524 |
},
|
525 |
{
|
526 |
"name": "Pixel art",
|
527 |
+
"prompt": "{prompt}, pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
|
528 |
+
"negative_prompt": "nsfw, sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
|
529 |
},
|
530 |
{
|
531 |
"name": "Fantasy art",
|
532 |
+
"prompt": "{prompt}, ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
|
533 |
+
"negative_prompt": "nsfw, photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
|
534 |
},
|
535 |
{
|
536 |
"name": "Neonpunk",
|
537 |
+
"prompt": "{prompt}, neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
|
538 |
+
"negative_prompt": "nsfw, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
|
539 |
},
|
540 |
{
|
541 |
"name": "3D Model",
|
542 |
+
"prompt": "{prompt}, professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
|
543 |
+
"negative_prompt": "nsfw, ugly, deformed, noisy, low poly, blurry, painting",
|
544 |
},
|
545 |
]
|
546 |
|
|
|
581 |
# for item in sdxl_loras
|
582 |
# ]
|
583 |
|
584 |
+
wildcard_files = load_wildcard_files("wildcard")
|
585 |
+
|
586 |
with gr.Blocks(css="style.css", theme="NoCrypt/miku@1.2.1") as demo:
|
587 |
title = gr.HTML(
|
588 |
f"""<h1><span>{DESCRIPTION}</span></h1>""",
|
589 |
elem_id="title",
|
590 |
)
|
591 |
gr.Markdown(
|
592 |
+
f"""Gradio demo for [cagliostrolab/animagine-xl-3.0](https://huggingface.co/cagliostrolab/animagine-xl-3.0)""",
|
593 |
elem_id="subtitle",
|
594 |
)
|
595 |
+
gr.Markdown(
|
596 |
+
f"""Prompting is a bit different in this iteration, we train the model like this:
|
597 |
+
```
|
598 |
+
1girl/1boy, character name, from what series, everything else in any order.
|
599 |
+
```
|
600 |
+
Prompting Tips
|
601 |
+
```
|
602 |
+
1. Quality Tags: `masterpiece, best quality, high quality, normal quality, worst quality, low quality`
|
603 |
+
2. Year Tags: `oldest, early, mid, late, newest`
|
604 |
+
3. Rating tags: `rating: general, rating: sensitive, rating: questionable, rating: explicit, nsfw`
|
605 |
+
4. Escape character: `character name \(series\)`
|
606 |
+
5. Recommended settings: `Euler a, cfg 5-7, 25-28 steps`
|
607 |
+
6. It's recommended to use the exact danbooru tags for more accurate result
|
608 |
+
7. To use character wildcard, add this syntax to the prompt `__character__`.
|
609 |
+
```
|
610 |
+
""",
|
611 |
+
elem_id="subtitle",
|
612 |
+
)
|
613 |
gr.DuplicateButton(
|
614 |
value="Duplicate Space for private use",
|
615 |
elem_id="duplicate-button",
|
|
|
630 |
max_lines=5,
|
631 |
placeholder="Enter a negative prompt",
|
632 |
)
|
633 |
+
with gr.Accordion(label="Quality Tags", open=True):
|
634 |
+
add_quality_tags = gr.Checkbox(label="Add Quality Tags", value=False)
|
635 |
quality_selector = gr.Dropdown(
|
636 |
+
label="Quality Tags Presets",
|
|
|
637 |
interactive=True,
|
638 |
choices=list(quality_prompt.keys()),
|
639 |
value="Standard",
|
640 |
)
|
641 |
with gr.Row():
|
|
|
642 |
use_lora = gr.Checkbox(label="Use LoRA", value=False)
|
|
|
|
|
|
|
|
|
|
|
643 |
with gr.Group(visible=False) as lora_group:
|
644 |
selector_info = gr.Text(
|
645 |
label="Selected LoRA",
|
|
|
673 |
aspect_ratio_selector = gr.Radio(
|
674 |
label="Aspect Ratio",
|
675 |
choices=aspect_ratios,
|
676 |
+
value="896 x 1152",
|
677 |
container=True,
|
678 |
)
|
679 |
with gr.Group():
|
|
|
729 |
guidance_scale = gr.Slider(
|
730 |
label="Guidance scale",
|
731 |
minimum=1,
|
732 |
+
maximum=12,
|
733 |
step=0.1,
|
734 |
+
value=7.0,
|
735 |
)
|
736 |
num_inference_steps = gr.Slider(
|
737 |
label="Number of inference steps",
|
738 |
minimum=1,
|
739 |
+
maximum=50,
|
740 |
step=1,
|
741 |
+
value=28,
|
742 |
)
|
743 |
|
744 |
with gr.Tab("Past Generation"):
|
|
|
767 |
queue=False,
|
768 |
show_progress=False,
|
769 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
770 |
use_lora.change(
|
771 |
fn=lambda x: gr.update(visible=x),
|
772 |
inputs=use_lora,
|
|
|
800 |
use_lora,
|
801 |
lora_weight,
|
802 |
selected_state,
|
|
|
803 |
sampler,
|
804 |
aspect_ratio_selector,
|
805 |
style_selector,
|
|
|
807 |
use_upscaler,
|
808 |
upscaler_strength,
|
809 |
upscale_by,
|
810 |
+
add_quality_tags
|
811 |
]
|
812 |
|
813 |
prompt.submit(
|
demo.ipynb
CHANGED
@@ -1,62 +1,93 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
"
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
"
|
47 |
-
"
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
"
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
"
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
},
|
60 |
-
"
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"id": "538a3f0c-50c1-4952-9fcc-070d365c9a0f",
|
7 |
+
"metadata": {
|
8 |
+
"scrolled": true
|
9 |
+
},
|
10 |
+
"outputs": [],
|
11 |
+
"source": [
|
12 |
+
"import os\n",
|
13 |
+
"import subprocess\n",
|
14 |
+
"from threading import Timer\n",
|
15 |
+
"from queue import Queue\n",
|
16 |
+
"\n",
|
17 |
+
"ROOT_DIR = \"/content\"\n",
|
18 |
+
"REPO_URL = \"https://huggingface.co/spaces/Linaqruf/animagine-xl\"\n",
|
19 |
+
"REPO_DIR = os.path.join(ROOT_DIR, \"cagliostro-webui\")\n",
|
20 |
+
"NGROK_TOKEN = \"\"\n",
|
21 |
+
"\n",
|
22 |
+
"os.environ[\"HF_TOKEN\"] = \"\"\n",
|
23 |
+
"os.environ[\"IS_COLAB\"] = \"1\"\n",
|
24 |
+
"os.environ[\"MODEL\"] = \"https://huggingface.co/cagliostrolab/animagine-xl-3.0/blob/main/animagine-xl-3.0.safetensors\"\n",
|
25 |
+
"os.environ[\"CACHE_EXAMPLES\"] = \"1\"\n",
|
26 |
+
"\n",
|
27 |
+
"def clone(url, dir, branch=None):\n",
|
28 |
+
" subprocess.run([\"git\", \"clone\", url, dir], check=True)\n",
|
29 |
+
" if branch:\n",
|
30 |
+
" subprocess.run([\"git\", \"checkout\", branch], cwd=dir, check=True)\n",
|
31 |
+
"\n",
|
32 |
+
"def install_deps(dir):\n",
|
33 |
+
" subprocess.run([\"pip\", \"install\", \"-r\", \"requirements.txt\"], cwd=dir, check=True)\n",
|
34 |
+
"\n",
|
35 |
+
"def ngrok_tunnel(port,queue,auth_token):\n",
|
36 |
+
" ngrok.set_auth_token(auth_token)\n",
|
37 |
+
" url = ngrok.connect(port)\n",
|
38 |
+
" queue.put(url)\n",
|
39 |
+
"\n",
|
40 |
+
"def main():\n",
|
41 |
+
" if not os.path.exists(REPO_DIR):\n",
|
42 |
+
" print(f\"Cloning Repository to {REPO_DIR}\")\n",
|
43 |
+
" clone(REPO_URL, REPO_DIR)\n",
|
44 |
+
" print(f\"Installing required python libraries\")\n",
|
45 |
+
" install_deps(REPO_DIR)\n",
|
46 |
+
" print(\"Done!\")\n",
|
47 |
+
"\n",
|
48 |
+
" os.chdir(REPO_DIR)\n",
|
49 |
+
" \n",
|
50 |
+
" if NGROK_TOKEN:\n",
|
51 |
+
" try:\n",
|
52 |
+
" from pyngrok import conf,ngrok\n",
|
53 |
+
" except:\n",
|
54 |
+
" !pip install -qqqq --upgrade setuptools\n",
|
55 |
+
" !pip install -qqqq -U pyngrok\n",
|
56 |
+
" from pyngrok import conf,ngrok\n",
|
57 |
+
" \n",
|
58 |
+
" ngrok_output_queue = Queue()\n",
|
59 |
+
" ngrok_thread = Timer(2, ngrok_tunnel, args=(7860, ngrok_output_queue, NGROK_TOKEN))\n",
|
60 |
+
" ngrok_thread.start()\n",
|
61 |
+
" ngrok_thread.join()\n",
|
62 |
+
" \n",
|
63 |
+
" print(ngrok_output_queue.get()) \n",
|
64 |
+
" \n",
|
65 |
+
" !python app.py\n",
|
66 |
+
"\n",
|
67 |
+
"if __name__ == \"__main__\":\n",
|
68 |
+
" main()"
|
69 |
+
]
|
70 |
+
}
|
71 |
+
],
|
72 |
+
"metadata": {
|
73 |
+
"kernelspec": {
|
74 |
+
"display_name": "Python 3 (ipykernel)",
|
75 |
+
"language": "python",
|
76 |
+
"name": "python3"
|
77 |
},
|
78 |
+
"language_info": {
|
79 |
+
"codemirror_mode": {
|
80 |
+
"name": "ipython",
|
81 |
+
"version": 3
|
82 |
+
},
|
83 |
+
"file_extension": ".py",
|
84 |
+
"mimetype": "text/x-python",
|
85 |
+
"name": "python",
|
86 |
+
"nbconvert_exporter": "python",
|
87 |
+
"pygments_lexer": "ipython3",
|
88 |
+
"version": "3.10.12"
|
89 |
+
}
|
90 |
+
},
|
91 |
+
"nbformat": 4,
|
92 |
+
"nbformat_minor": 5
|
93 |
+
}
|
wildcard/character.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|