mundo2333 commited on
Commit
b525d33
1 Parent(s): 464d545

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +177 -0
  2. requirements.txt +17 -0
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from email import generator
3
+ from diffusers import StableDiffusionPipeline
4
+ from diffusers import StableDiffusionImg2ImgPipeline
5
+ import gradio as gr
6
+ import torch
7
+
8
+ models = [
9
+ "nitrosocke/Arcane-Diffusion",
10
+ "nitrosocke/archer-diffusion",
11
+ "nitrosocke/elden-ring-diffusion",
12
+ "nitrosocke/spider-verse-diffusion",
13
+ "nitrosocke/modern-disney-diffusion",
14
+ "hakurei/waifu-diffusion",
15
+ "lambdalabs/sd-pokemon-diffusers",
16
+ "yuk/fuyuko-waifu-diffusion",
17
+ "AstraliteHeart/pony-diffusion",
18
+ "IfanSnek/JohnDiffusion",
19
+ "nousr/robo-diffusion",
20
+ "DGSpitzer/Cyberpunk-Anime-Diffusion"
21
+ ]
22
+
23
+ prompt_prefixes = {
24
+ models[0]: "arcane style ",
25
+ models[1]: "archer style ",
26
+ models[2]: "elden ring style ",
27
+ models[3]: "spiderverse style ",
28
+ models[4]: "modern disney style ",
29
+ models[5]: "",
30
+ models[6]: "",
31
+ models[7]: "",
32
+ models[8]: "",
33
+ models[9]: "",
34
+ models[10]: "",
35
+ models[11]: "dgs illustration style ",
36
+ }
37
+
38
+ current_model = models[0]
39
+ pipe = StableDiffusionPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
40
+ if torch.cuda.is_available():
41
+ pipe = pipe.to("cuda")
42
+
43
+ device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
44
+
45
+ def inference(model, img, strength, prompt, guidance, steps, seed):
46
+
47
+ generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
48
+
49
+ if img is not None:
50
+ return img_inference(model, prompt, img, strength, guidance, steps, generator)
51
+ else:
52
+ return text_inference(model, prompt, guidance, steps, generator)
53
+
54
+ def text_inference(model, prompt, guidance, steps, generator=None):
55
+
56
+ global current_model
57
+ global pipe
58
+ if model != current_model:
59
+ current_model = model
60
+ pipe = StableDiffusionPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
61
+
62
+ if torch.cuda.is_available():
63
+ pipe = pipe.to("cuda")
64
+
65
+ prompt = prompt_prefixes[current_model] + prompt
66
+ image = pipe(
67
+ prompt,
68
+ num_inference_steps=int(steps),
69
+ guidance_scale=guidance,
70
+ width=512,
71
+ height=512,
72
+ generator=generator).images[0]
73
+ return image
74
+
75
+ def img_inference(model, prompt, img, strength, guidance, steps, generator):
76
+
77
+ global current_model
78
+ global pipe
79
+ if model != current_model:
80
+ current_model = model
81
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model, torch_dtype=torch.float16)
82
+
83
+ if torch.cuda.is_available():
84
+ pipe = pipe.to("cuda")
85
+
86
+ prompt = prompt_prefixes[current_model] + prompt
87
+ img.resize((512, 512))
88
+ image = pipe(
89
+ prompt,
90
+ init_image=img,
91
+ num_inference_steps=int(steps),
92
+ strength=strength,
93
+ guidance_scale=guidance,
94
+ width=512,
95
+ height=512,
96
+ generator=generator).images[0]
97
+ return image
98
+
99
+
100
+ css = """
101
+ <style>
102
+ .finetuned-diffusion-div {
103
+ text-align: center;
104
+ max-width: 700px;
105
+ margin: 0 auto;
106
+ }
107
+ .finetuned-diffusion-div div {
108
+ display: inline-flex;
109
+ align-items: center;
110
+ gap: 0.8rem;
111
+ font-size: 1.75rem;
112
+ }
113
+ .finetuned-diffusion-div div h1 {
114
+ font-weight: 900;
115
+ margin-bottom: 7px;
116
+ }
117
+ .finetuned-diffusion-div p {
118
+ margin-bottom: 10px;
119
+ font-size: 94%;
120
+ }
121
+ .finetuned-diffusion-div p a {
122
+ text-decoration: underline;
123
+ }
124
+ </style>
125
+ """
126
+ with gr.Blocks(css=css) as demo:
127
+ gr.HTML(
128
+ """
129
+ <div class="finetuned-diffusion-div">
130
+ <div>
131
+ <h1>Finetuned Diffusion</h1>
132
+ </div>
133
+ <p>
134
+ Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
135
+ <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spiderverse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokemon</a>, <a href="https://huggingface.co/yuk/fuyuko-waifu-diffusion">Fuyuko Waifu</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony</a>, <a href="https://huggingface.co/IfanSnek/JohnDiffusion">John</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>
136
+ </p>
137
+ </div>
138
+ """
139
+ )
140
+ with gr.Row():
141
+
142
+ with gr.Column():
143
+
144
+ model = gr.Dropdown(label="Model", choices=models, value=models[0])
145
+ prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
146
+ with gr.Accordion("Image to image (optional)", open=False):
147
+ image = gr.Image(label="Image", height=256, tool="editor", type="pil")
148
+ strength = gr.Slider(label="Strength", minimum=0, maximum=1, step=0.01, value=0.75)
149
+
150
+ with gr.Accordion("Advanced options", open=False):
151
+ guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
152
+ steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
153
+ seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
154
+
155
+ run = gr.Button(value="Run")
156
+ gr.Markdown(f"Running on: {device}")
157
+ with gr.Column():
158
+ image_out = gr.Image(height=512)
159
+
160
+ prompt.submit(inference, inputs=[model, image, strength, prompt, guidance, steps, seed], outputs=image_out)
161
+ run.click(inference, inputs=[model, image, strength, prompt, guidance, steps, seed], outputs=image_out)
162
+ gr.Examples([
163
+ [models[0], "jason bateman disassembling the demon core", 7.5, 50],
164
+ [models[3], "portrait of dwayne johnson", 7.0, 75],
165
+ [models[4], "portrait of a beautiful alyx vance half life", 10, 50],
166
+ [models[5], "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7, 45],
167
+ [models[4], "fantasy portrait painting, digital art", 4, 30],
168
+ ], [model, prompt, guidance, steps], image_out, text_inference, cache_examples=torch.cuda.is_available())
169
+ gr.Markdown('''
170
+ Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️<br>
171
+ Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe)
172
+
173
+ ![visitors](https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion)
174
+ ''')
175
+
176
+ demo.queue()
177
+ demo.launch(share = True)
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch
3
+ torchvision==0.13.1+cu113
4
+ diffusers==0.14.0
5
+ #git+https://github.com/huggingface/diffusers.git
6
+ transformers==4.28.1
7
+ #git+https://github.com/huggingface/transformers
8
+ scipy
9
+ ftfy
10
+ psutil
11
+ accelerate==0.12.0
12
+ #OmegaConf
13
+ #pytorch_lightning
14
+ #triton==2.0.0.dev20220701
15
+ triton
16
+ #https://github.com/apolinario/xformers/releases/download/0.0.3/xformers-0.0.14.dev0-cp38-cp38-linux_x86_64.whl
17
+ https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl