hysts HF staff commited on
Commit
2b755c2
1 Parent(s): 7c908c6
Files changed (9) hide show
  1. .pre-commit-config.yaml +50 -0
  2. .vscode/settings.json +21 -0
  3. Dockerfile +57 -0
  4. LICENSE +21 -0
  5. README.md +5 -5
  6. app.py +149 -0
  7. model.py +191 -0
  8. requirements.txt +12 -0
  9. style.css +10 -0
.pre-commit-config.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.12.0
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.5.1
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies: ["types-python-slugify", "types-requests", "types-PyYAML"]
32
+ - repo: https://github.com/psf/black
33
+ rev: 23.7.0
34
+ hooks:
35
+ - id: black
36
+ language_version: python3.10
37
+ args: ["--line-length", "119"]
38
+ - repo: https://github.com/kynan/nbstripout
39
+ rev: 0.6.1
40
+ hooks:
41
+ - id: nbstripout
42
+ args: ["--extra-keys", "metadata.interpreter metadata.kernelspec cell.metadata.pycharm"]
43
+ - repo: https://github.com/nbQA-dev/nbQA
44
+ rev: 1.7.0
45
+ hooks:
46
+ - id: nbqa-black
47
+ - id: nbqa-pyupgrade
48
+ args: ["--py37-plus"]
49
+ - id: nbqa-isort
50
+ args: ["--float-to-top"]
.vscode/settings.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter",
4
+ "editor.formatOnType": true,
5
+ "editor.codeActionsOnSave": {
6
+ "source.organizeImports": true
7
+ }
8
+ },
9
+ "black-formatter.args": [
10
+ "--line-length=119"
11
+ ],
12
+ "isort.args": ["--profile", "black"],
13
+ "flake8.args": [
14
+ "--max-line-length=119"
15
+ ],
16
+ "ruff.args": [
17
+ "--line-length=119"
18
+ ],
19
+ "editor.formatOnSave": true,
20
+ "files.insertFinalNewline": true
21
+ }
Dockerfile ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04
2
+ ENV DEBIAN_FRONTEND=noninteractive
3
+ RUN apt-get update && \
4
+ apt-get upgrade -y && \
5
+ apt-get install -y --no-install-recommends \
6
+ git \
7
+ git-lfs \
8
+ wget \
9
+ curl \
10
+ # python build dependencies \
11
+ build-essential \
12
+ libssl-dev \
13
+ zlib1g-dev \
14
+ libbz2-dev \
15
+ libreadline-dev \
16
+ libsqlite3-dev \
17
+ libncursesw5-dev \
18
+ xz-utils \
19
+ tk-dev \
20
+ libxml2-dev \
21
+ libxmlsec1-dev \
22
+ libffi-dev \
23
+ liblzma-dev \
24
+ # gradio dependencies \
25
+ ffmpeg && \
26
+ apt-get clean && \
27
+ rm -rf /var/lib/apt/lists/*
28
+
29
+ RUN useradd -m -u 1000 user
30
+ USER user
31
+ ENV HOME=/home/user \
32
+ PATH=/home/user/.local/bin:${PATH}
33
+ WORKDIR ${HOME}/app
34
+
35
+ RUN curl https://pyenv.run | bash
36
+ ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
37
+ ARG PYTHON_VERSION=3.10.12
38
+ RUN pyenv install ${PYTHON_VERSION} && \
39
+ pyenv global ${PYTHON_VERSION} && \
40
+ pyenv rehash && \
41
+ pip install --no-cache-dir -U pip setuptools wheel
42
+
43
+ COPY --chown=1000 ./requirements.txt /tmp/requirements.txt
44
+ RUN pip install --no-cache-dir --upgrade -r /tmp/requirements.txt && \
45
+ mim install mmcv==2.0.1 && \
46
+ mim install mmdet==3.1.0 && \
47
+ mim install mmpose==1.1.0
48
+
49
+ COPY --chown=1000 . ${HOME}/app
50
+ ENV PYTHONPATH=${HOME}/app \
51
+ PYTHONUNBUFFERED=1 \
52
+ GRADIO_ALLOW_FLAGGING=never \
53
+ GRADIO_NUM_PORTS=1 \
54
+ GRADIO_SERVER_NAME=0.0.0.0 \
55
+ GRADIO_THEME=huggingface \
56
+ SYSTEM=spaces
57
+ CMD ["python", "app.py"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 hysts
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: T2I Adapter SDXL
3
- emoji: 👀
4
  colorFrom: purple
5
  colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.42.0
8
- app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: T2I-Adapter-SDXL
3
+ emoji: 🚀
4
  colorFrom: purple
5
  colorTo: yellow
6
+ sdk: docker
 
 
7
  pinned: false
8
+ license: mit
9
+ suggested_hardware: t4-small
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+ import random
5
+
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+
10
+ from model import ADAPTER_NAMES, Model
11
+
12
+ DESCRIPTION = "# T2I-Adapter-SDXL"
13
+
14
+ if not torch.cuda.is_available():
15
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
16
+
17
+ MAX_SEED = np.iinfo(np.int32).max
18
+
19
+
20
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
21
+ if randomize_seed:
22
+ seed = random.randint(0, MAX_SEED)
23
+ return seed
24
+
25
+
26
+ model = Model(ADAPTER_NAMES[0])
27
+
28
+ with gr.Blocks(css="style.css") as demo:
29
+ gr.Markdown(DESCRIPTION)
30
+ gr.DuplicateButton(
31
+ value="Duplicate Space for private use",
32
+ elem_id="duplicate-button",
33
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
34
+ )
35
+
36
+ with gr.Row():
37
+ with gr.Column():
38
+ with gr.Group():
39
+ image = gr.Image(label="Input image", type="pil", height=600)
40
+ prompt = gr.Textbox(label="Prompt")
41
+ adapter_name = gr.Dropdown(label="Adapter", choices=ADAPTER_NAMES, value=ADAPTER_NAMES[0])
42
+ run_button = gr.Button("Run")
43
+ with gr.Accordion("Advanced options", open=False):
44
+ apply_preprocess = gr.Checkbox(label="Apply preprocess", value=True)
45
+ negative_prompt = gr.Textbox(
46
+ label="Negative prompt",
47
+ value="anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
48
+ )
49
+ num_inference_steps = gr.Slider(
50
+ label="Number of steps",
51
+ minimum=1,
52
+ maximum=Model.MAX_NUM_INFERENCE_STEPS,
53
+ step=1,
54
+ value=30,
55
+ )
56
+ guidance_scale = gr.Slider(
57
+ label="Guidance scale",
58
+ minimum=0.1,
59
+ maximum=30.0,
60
+ step=0.1,
61
+ value=7.5,
62
+ )
63
+ adapter_conditioning_scale = gr.Slider(
64
+ label="Adapter Conditioning Scale",
65
+ minimum=0.5,
66
+ maximum=1,
67
+ step=0.1,
68
+ value=0.8,
69
+ )
70
+ cond_tau = gr.Slider(
71
+ label="Fraction of timesteps for which adapter should be applied",
72
+ minimum=0.1,
73
+ maximum=1.0,
74
+ step=0.1,
75
+ value=0.8,
76
+ )
77
+ seed = gr.Slider(
78
+ label="Seed",
79
+ minimum=0,
80
+ maximum=MAX_SEED,
81
+ step=1,
82
+ value=0,
83
+ )
84
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
85
+ with gr.Column():
86
+ result = gr.Gallery(label="Result", columns=2, height=600, object_fit="scale-down", show_label=False)
87
+
88
+ inputs = [
89
+ image,
90
+ prompt,
91
+ negative_prompt,
92
+ num_inference_steps,
93
+ guidance_scale,
94
+ adapter_conditioning_scale,
95
+ cond_tau,
96
+ seed,
97
+ apply_preprocess,
98
+ ]
99
+ prompt.submit(
100
+ fn=randomize_seed_fn,
101
+ inputs=[seed, randomize_seed],
102
+ outputs=seed,
103
+ queue=False,
104
+ api_name=False,
105
+ ).then(
106
+ fn=model.change_adapter,
107
+ inputs=adapter_name,
108
+ api_name=False,
109
+ ).success(
110
+ fn=model.run,
111
+ inputs=inputs,
112
+ outputs=result,
113
+ api_name=False,
114
+ )
115
+ negative_prompt.submit(
116
+ fn=randomize_seed_fn,
117
+ inputs=[seed, randomize_seed],
118
+ outputs=seed,
119
+ queue=False,
120
+ api_name=False,
121
+ ).then(
122
+ fn=model.change_adapter,
123
+ inputs=adapter_name,
124
+ api_name=False,
125
+ ).success(
126
+ fn=model.run,
127
+ inputs=inputs,
128
+ outputs=result,
129
+ api_name=False,
130
+ )
131
+ run_button.click(
132
+ fn=randomize_seed_fn,
133
+ inputs=[seed, randomize_seed],
134
+ outputs=seed,
135
+ queue=False,
136
+ api_name=False,
137
+ ).then(
138
+ fn=model.change_adapter,
139
+ inputs=adapter_name,
140
+ api_name=False,
141
+ ).success(
142
+ fn=model.run,
143
+ inputs=inputs,
144
+ outputs=result,
145
+ api_name="run",
146
+ )
147
+
148
+ if __name__ == "__main__":
149
+ demo.queue(max_size=20).launch()
model.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable
2
+
3
+ import PIL.Image
4
+ import torch
5
+ from controlnet_aux import (
6
+ CannyDetector,
7
+ LineartDetector,
8
+ MidasDetector,
9
+ PidiNetDetector,
10
+ ZoeDetector,
11
+ )
12
+ from diffusers import (
13
+ AutoencoderKL,
14
+ EulerAncestralDiscreteScheduler,
15
+ StableDiffusionXLAdapterPipeline,
16
+ T2IAdapter,
17
+ )
18
+
19
+ ADAPTER_NAMES = [
20
+ "TencentARC/t2i-adapter-canny-sdxl-1.0",
21
+ "TencentARC/t2i-adapter-sketch-sdxl-1.0",
22
+ "TencentARC/t2i-adapter-lineart-sdxl-1.0",
23
+ "TencentARC/t2i-adapter-depth-midas-sdxl-1.0",
24
+ "TencentARC/t2i-adapter-depth-zoe-sdxl-1.0",
25
+ "TencentARC/t2i-adapter-recolor-sdxl-1.0",
26
+ ]
27
+
28
+
29
+ class CannyPreprocessor:
30
+ def __init__(self):
31
+ self.model = CannyDetector()
32
+
33
+ def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
34
+ return self.model(image, detect_resolution=384, image_resolution=1024)
35
+
36
+
37
+ class LineartPreprocessor:
38
+ def __init__(self):
39
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
40
+ self.model = LineartDetector.from_pretrained("lllyasviel/Annotators").to(device)
41
+
42
+ def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
43
+ return self.model(image, detect_resolution=384, image_resolution=1024)
44
+
45
+
46
+ class MidasPreprocessor:
47
+ def __init__(self):
48
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
49
+ self.model = MidasDetector.from_pretrained(
50
+ "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
51
+ ).to(device)
52
+
53
+ def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
54
+ return self.model(image, detect_resolution=512, image_resolution=1024)
55
+
56
+
57
+ class PidiNetPreprocessor:
58
+ def __init__(self):
59
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60
+ self.model = PidiNetDetector.from_pretrained("lllyasviel/Annotators").to(device)
61
+
62
+ def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
63
+ return self.model(image, detect_resolution=512, image_resolution=1024, apply_filter=True)
64
+
65
+
66
+ class RecolorPreprocessor:
67
+ def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
68
+ return image.convert("L").convert("RGB")
69
+
70
+
71
+ class ZoePreprocessor:
72
+ def __init__(self):
73
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
74
+ self.model = ZoeDetector.from_pretrained(
75
+ "valhalla/t2iadapter-aux-models", filename="zoed_nk.pth", model_type="zoedepth_nk"
76
+ ).to(device)
77
+
78
+ def __call__(self, image: PIL.Image.Image) -> PIL.Image.Image:
79
+ return self.model(image, gamma_corrected=True)
80
+
81
+
82
+ def get_preprocessor(adapter_name: str) -> Callable[[PIL.Image.Image], PIL.Image.Image]:
83
+ if adapter_name == "TencentARC/t2i-adapter-canny-sdxl-1.0":
84
+ return CannyPreprocessor()
85
+ elif adapter_name == "TencentARC/t2i-adapter-sketch-sdxl-1.0":
86
+ return PidiNetPreprocessor()
87
+ elif adapter_name == "TencentARC/t2i-adapter-lineart-sdxl-1.0":
88
+ return LineartPreprocessor()
89
+ elif adapter_name == "TencentARC/t2i-adapter-depth-midas-sdxl-1.0":
90
+ return MidasPreprocessor()
91
+ elif adapter_name == "TencentARC/t2i-adapter-depth-zoe-sdxl-1.0":
92
+ return ZoePreprocessor()
93
+ elif adapter_name == "TencentARC/t2i-adapter-recolor-sdxl-1.0":
94
+ return RecolorPreprocessor()
95
+ else:
96
+ raise ValueError(f"Adapter name must be one of {ADAPTER_NAMES}")
97
+
98
+
99
+ class Model:
100
+ MAX_NUM_INFERENCE_STEPS = 50
101
+
102
+ def __init__(self, adapter_name: str):
103
+ if adapter_name not in ADAPTER_NAMES:
104
+ raise ValueError(f"Adapter name must be one of {ADAPTER_NAMES}")
105
+
106
+ self.adapter_name = adapter_name
107
+
108
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
109
+ if torch.cuda.is_available():
110
+ self.preprocessor = get_preprocessor(adapter_name)
111
+
112
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
113
+ adapter = T2IAdapter.from_pretrained(
114
+ adapter_name,
115
+ torch_dtype=torch.float16,
116
+ varient="fp16",
117
+ ).to(self.device)
118
+ euler_a = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
119
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
120
+ self.pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
121
+ model_id,
122
+ vae=vae,
123
+ adapter=adapter,
124
+ scheduler=euler_a,
125
+ torch_dtype=torch.float16,
126
+ variant="fp16",
127
+ ).to(self.device)
128
+ self.pipe.enable_xformers_memory_efficient_attention()
129
+ else:
130
+ self.pipe = None
131
+
132
+ def change_adapter(self, adapter_name: str) -> None:
133
+ if not torch.cuda.is_available():
134
+ raise RuntimeError("This demo does not work on CPU.")
135
+ if adapter_name not in ADAPTER_NAMES:
136
+ raise ValueError(f"Adapter name must be one of {ADAPTER_NAMES}")
137
+ if adapter_name == self.adapter_name:
138
+ return
139
+
140
+ self.preprocessor = None # type: ignore
141
+ torch.cuda.empty_cache()
142
+ self.preprocessor = get_preprocessor(adapter_name)
143
+
144
+ self.pipe.adapter = None
145
+ torch.cuda.empty_cache()
146
+ self.pipe.adapter = T2IAdapter.from_pretrained(
147
+ adapter_name,
148
+ torch_dtype=torch.float16,
149
+ varient="fp16",
150
+ ).to(self.device)
151
+
152
+ def resize_image(self, image: PIL.Image.Image) -> PIL.Image.Image:
153
+ w, h = image.size
154
+ scale = 1024 / max(w, h)
155
+ new_w = int(w * scale)
156
+ new_h = int(h * scale)
157
+ return image.resize((new_w, new_h), PIL.Image.LANCZOS)
158
+
159
+ def run(
160
+ self,
161
+ image: PIL.Image.Image,
162
+ prompt: str,
163
+ negative_prompt: str,
164
+ num_inference_steps: int = 30,
165
+ guidance_scale: float = 7.5,
166
+ adapter_conditioning_scale: float = 0.8,
167
+ cond_tau: float = 0.8,
168
+ seed: int = 0,
169
+ apply_preprocess: bool = True,
170
+ ) -> list[PIL.Image.Image]:
171
+ if num_inference_steps > self.MAX_NUM_INFERENCE_STEPS:
172
+ raise ValueError(f"Number of steps must be less than {self.MAX_NUM_INFERENCE_STEPS}")
173
+
174
+ # Resize image to avoid OOM
175
+ image = self.resize_image(image)
176
+
177
+ if apply_preprocess:
178
+ image = self.preprocessor(image)
179
+
180
+ generator = torch.Generator(device=self.device).manual_seed(seed)
181
+ out = self.pipe(
182
+ prompt=prompt,
183
+ negative_prompt=negative_prompt,
184
+ image=image,
185
+ num_inference_steps=num_inference_steps,
186
+ adapter_conditioning_scale=adapter_conditioning_scale,
187
+ cond_tau=cond_tau,
188
+ generator=generator,
189
+ guidance_scale=guidance_scale,
190
+ ).images[0]
191
+ return [image, out]
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.22.0
2
+ controlnet_aux==0.0.7
3
+ git+https://github.com/huggingface/diffusers@t2iadapterxl
4
+ gradio==3.42.0
5
+ openmim==0.3.9
6
+ Pillow==10.0.0
7
+ safetensors==0.3.3
8
+ timm==0.6.12
9
+ torch==2.0.1
10
+ torchvision==0.15.2
11
+ transformers==4.33.0
12
+ xformers==0.0.20
style.css ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ color: #fff;
8
+ background: #1565c0;
9
+ border-radius: 100vh;
10
+ }