Spaces:
Runtime error
Runtime error
Commit
·
a6ec9cb
1
Parent(s):
7686283
model upload
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Dockerfile +39 -43
- developer_console.sh +53 -0
- models/gfpgan/GFPGANv1.3.pth +3 -0
- models/gfpgan/Place your gfpgan model files here.txt +1 -0
- models/hypernetwork/Place your hypernetwork model files here.txt +1 -0
- models/realesrgan/Place your realesrgan model files here.txt +1 -0
- models/realesrgan/RealESRGAN_x4plus.pth +3 -0
- models/realesrgan/RealESRGAN_x4plus_anime_6B.pth +3 -0
- models/stable-diffusion/Place your stable-diffusion model files here.txt +1 -0
- models/stable-diffusion/sd-v1-4.ckpt +3 -0
- models/vae/Place your vae model files here.txt +1 -0
- models/vae/vae-ft-mse-840000-ema-pruned.ckpt +3 -0
- node-shark/Dockerfile +0 -24
- node-shark/app.js +0 -49
- node-shark/package-lock.json +0 -374
- node-shark/package.json +0 -21
- node-shark/views/css/styles.css +0 -45
- node-shark/views/index.html +0 -54
- node-shark/views/sharks.html +0 -52
- scripts/check_modules.py +13 -0
- scripts/functions.sh +1 -1
- scripts/install_status.txt +1 -0
- scripts/on_sd_start.sh +323 -0
- ui/__pycache__/main.cpython-38.pyc +0 -0
- ui/easydiffusion/__init__.py +0 -0
- ui/easydiffusion/__pycache__/__init__.cpython-38.pyc +0 -0
- ui/easydiffusion/__pycache__/app.cpython-38.pyc +0 -0
- ui/easydiffusion/__pycache__/device_manager.cpython-38.pyc +0 -0
- ui/easydiffusion/__pycache__/model_manager.cpython-38.pyc +0 -0
- ui/easydiffusion/__pycache__/renderer.cpython-38.pyc +0 -0
- ui/easydiffusion/__pycache__/server.cpython-38.pyc +0 -0
- ui/easydiffusion/__pycache__/task_manager.cpython-38.pyc +0 -0
- ui/easydiffusion/__pycache__/types.cpython-38.pyc +0 -0
- ui/easydiffusion/app.py +328 -0
- ui/easydiffusion/device_manager.py +253 -0
- ui/easydiffusion/model_manager.py +255 -0
- ui/easydiffusion/renderer.py +180 -0
- ui/easydiffusion/server.py +304 -0
- ui/easydiffusion/task_manager.py +565 -0
- ui/easydiffusion/types.py +103 -0
- ui/easydiffusion/utils/__init__.py +8 -0
- ui/easydiffusion/utils/__pycache__/__init__.cpython-38.pyc +0 -0
- ui/easydiffusion/utils/__pycache__/save_utils.cpython-38.pyc +0 -0
- ui/easydiffusion/utils/save_utils.py +131 -0
- ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142 +171 -0
- ui/index.html +514 -0
- ui/main.py +10 -0
- ui/media/css/auto-save.css +81 -0
- ui/media/css/fontawesome-all.min.css +0 -0
- ui/media/css/fonts.css +40 -0
Dockerfile
CHANGED
@@ -1,48 +1,44 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
#
|
26 |
-
#
|
27 |
-
#
|
28 |
-
|
29 |
-
|
30 |
-
#
|
31 |
-
|
32 |
-
|
33 |
-
# EXPOSE 9000
|
34 |
-
# # EXPOSE $PORT
|
35 |
-
|
36 |
-
# RUN chown -R huggingface /home/huggingface/easy-diffusion
|
37 |
-
# RUN chmod -R u+x /home/huggingface/easy-diffusion
|
38 |
-
|
39 |
-
# USER huggingface
|
40 |
|
41 |
-
|
|
|
42 |
|
43 |
-
|
|
|
|
|
|
|
44 |
|
45 |
-
|
46 |
|
47 |
-
|
48 |
|
|
|
1 |
+
FROM ubuntu:20.04
|
2 |
+
|
3 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
4 |
+
build-essential \
|
5 |
+
ca-certificates \
|
6 |
+
curl \
|
7 |
+
git \
|
8 |
+
libgl1-mesa-glx \
|
9 |
+
libglib2.0-0 \
|
10 |
+
libsm6 \
|
11 |
+
libxext6 \
|
12 |
+
libxrender-dev \
|
13 |
+
python3-dev \
|
14 |
+
python3-pip \
|
15 |
+
python3-setuptools \
|
16 |
+
python3-wheel \
|
17 |
+
sudo \
|
18 |
+
unzip \
|
19 |
+
vim \
|
20 |
+
wget \
|
21 |
+
&& rm -rf /var/lib/apt/lists/*
|
22 |
+
|
23 |
+
RUN useradd -m huggingface
|
24 |
+
|
25 |
+
# RUN wget https://github.com/cmdr2/stable-diffusion-ui/releases/download/v2.5.24/Easy-Diffusion-Linux.zip && \
|
26 |
+
# unzip Easy-Diffusion-Linux.zip && \
|
27 |
+
# rm Easy-Diffusion-Linux.zip
|
28 |
+
|
29 |
+
RUN mkdir easy-diffusion
|
30 |
+
# copy the local repo to the container
|
31 |
+
COPY . /home/huggingface/easy-diffusion
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
EXPOSE 9000
|
34 |
+
# EXPOSE $PORT
|
35 |
|
36 |
+
RUN chown -R huggingface /home/huggingface/easy-diffusion
|
37 |
+
RUN chmod -R u+x /home/huggingface/easy-diffusion
|
38 |
+
|
39 |
+
USER huggingface
|
40 |
|
41 |
+
WORKDIR /home/huggingface
|
42 |
|
43 |
+
CMD [ "easy-diffusion/start.sh" ]
|
44 |
|
developer_console.sh
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [ "$0" == "bash" ]; then
|
6 |
+
echo "Opening Stable Diffusion UI - Developer Console.."
|
7 |
+
echo ""
|
8 |
+
|
9 |
+
# set legacy and new installer's PATH, if they exist
|
10 |
+
if [ -e "installer" ]; then export PATH="$(pwd)/installer/bin:$PATH"; fi
|
11 |
+
if [ -e "installer_files/env" ]; then export PATH="$(pwd)/installer_files/env/bin:$PATH"; fi
|
12 |
+
|
13 |
+
# activate the installer env
|
14 |
+
CONDA_BASEPATH=$(conda info --base)
|
15 |
+
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # avoids the 'shell not initialized' error
|
16 |
+
|
17 |
+
conda activate
|
18 |
+
|
19 |
+
# test the environment
|
20 |
+
echo "Environment Info:"
|
21 |
+
which git
|
22 |
+
git --version
|
23 |
+
|
24 |
+
which conda
|
25 |
+
conda --version
|
26 |
+
|
27 |
+
echo ""
|
28 |
+
|
29 |
+
# activate the legacy environment (if present) and set PYTHONPATH
|
30 |
+
if [ -e "installer_files/env" ]; then
|
31 |
+
export PYTHONPATH="$(pwd)/installer_files/env/lib/python3.8/site-packages"
|
32 |
+
fi
|
33 |
+
if [ -e "stable-diffusion/env" ]; then
|
34 |
+
CONDA_BASEPATH=$(conda info --base)
|
35 |
+
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
36 |
+
|
37 |
+
conda activate ./stable-diffusion/env
|
38 |
+
|
39 |
+
export PYTHONPATH="$(pwd)/stable-diffusion/env/lib/python3.8/site-packages"
|
40 |
+
fi
|
41 |
+
|
42 |
+
which python
|
43 |
+
python --version
|
44 |
+
|
45 |
+
echo "PYTHONPATH=$PYTHONPATH"
|
46 |
+
|
47 |
+
# done
|
48 |
+
|
49 |
+
echo ""
|
50 |
+
else
|
51 |
+
file_name=$(basename "${BASH_SOURCE[0]}")
|
52 |
+
bash --init-file "$file_name"
|
53 |
+
fi
|
models/gfpgan/GFPGANv1.3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c953a88f2727c85c3d9ae72e2bd4846bbaf59fe6972ad94130e23e7017524a70
|
3 |
+
size 348632874
|
models/gfpgan/Place your gfpgan model files here.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Supported extensions: .pth
|
models/hypernetwork/Place your hypernetwork model files here.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Supported extensions: .pt or .safetensors
|
models/realesrgan/Place your realesrgan model files here.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Supported extensions: .pth
|
models/realesrgan/RealESRGAN_x4plus.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fa0d38905f75ac06eb49a7951b426670021be3018265fd191d2125df9d682f1
|
3 |
+
size 67040989
|
models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f872d837d3c90ed2e05227bed711af5671a6fd1c9f7d7e91c911a61f155e99da
|
3 |
+
size 17938799
|
models/stable-diffusion/Place your stable-diffusion model files here.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Supported extensions: .ckpt or .safetensors
|
models/stable-diffusion/sd-v1-4.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe4efff1e174c627256e44ec2991ba279b3816e364b49f9be2abc0b3ff3f8556
|
3 |
+
size 4265380512
|
models/vae/Place your vae model files here.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Supported extensions: .vae.pt or .ckpt or .safetensors
|
models/vae/vae-ft-mse-840000-ema-pruned.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6a580b13a5bc05a5e16e4dbb80608ff2ec251a162311590c1f34c013d7f3dab
|
3 |
+
size 334695179
|
node-shark/Dockerfile
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
FROM ubuntu:20.04
|
2 |
-
|
3 |
-
LABEL version="1.0"
|
4 |
-
|
5 |
-
RUN apt-get update
|
6 |
-
RUN apt-get install -y curl sudo
|
7 |
-
RUN curl -sL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
8 |
-
RUN apt-get install -y nodejs
|
9 |
-
|
10 |
-
# set working directory to /app
|
11 |
-
WORKDIR /app
|
12 |
-
|
13 |
-
# copy index.js from current directory into the container at /app
|
14 |
-
COPY . /app
|
15 |
-
|
16 |
-
# install need packages specified in package.json
|
17 |
-
RUN npm install
|
18 |
-
|
19 |
-
# This allows Heroku bind its PORT the Apps port
|
20 |
-
# since Heroku needs to use its own PORT before the App can be made accessible to the World
|
21 |
-
EXPOSE $PORT
|
22 |
-
|
23 |
-
# run app when container launches
|
24 |
-
CMD ["node", "app.js"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node-shark/app.js
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
// const http = require('http');
|
2 |
-
|
3 |
-
// const hostname = process.env.HOST || '0.0.0.0';
|
4 |
-
// const port = process.env.PORT || 7860;
|
5 |
-
|
6 |
-
// const server = http.createServer((req, res) => {
|
7 |
-
// res.statusCode = 200;
|
8 |
-
// res.setHeader('Content-Type', 'text/plain');
|
9 |
-
// res.end('Hello World');
|
10 |
-
// });
|
11 |
-
|
12 |
-
// server.listen(port, hostname, () => {
|
13 |
-
// console.log(`Server running at http://${hostname}:${port}/`);
|
14 |
-
// });
|
15 |
-
|
16 |
-
var express = require("express");
|
17 |
-
var app = express();
|
18 |
-
var router = express.Router();
|
19 |
-
|
20 |
-
var path = __dirname + '/views/';
|
21 |
-
|
22 |
-
// Constants
|
23 |
-
const PORT = process.env.PORT || 7860;
|
24 |
-
const HOST = '0.0.0.0';
|
25 |
-
|
26 |
-
router.use(function (req,res,next) {
|
27 |
-
console.log("/" + req.method);
|
28 |
-
next();
|
29 |
-
});
|
30 |
-
|
31 |
-
// Hello world api
|
32 |
-
router.get("/",function(req,res){
|
33 |
-
res.send("Hello world!");
|
34 |
-
});
|
35 |
-
|
36 |
-
router.get("/home",function(req,res){
|
37 |
-
res.sendFile(path + "index.html");
|
38 |
-
});
|
39 |
-
|
40 |
-
router.get("/sharks",function(req,res){
|
41 |
-
res.sendFile(path + "sharks.html");
|
42 |
-
});
|
43 |
-
|
44 |
-
app.use(express.static(path));
|
45 |
-
app.use("/", router);
|
46 |
-
|
47 |
-
app.listen(PORT, function () {
|
48 |
-
console.log(`Example app listening on port ${PORT}`)
|
49 |
-
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node-shark/package-lock.json
DELETED
@@ -1,374 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"name": "nodejs-image-demo",
|
3 |
-
"version": "1.0.0",
|
4 |
-
"lockfileVersion": 1,
|
5 |
-
"requires": true,
|
6 |
-
"dependencies": {
|
7 |
-
"accepts": {
|
8 |
-
"version": "1.3.7",
|
9 |
-
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
|
10 |
-
"integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
|
11 |
-
"requires": {
|
12 |
-
"mime-types": "~2.1.24",
|
13 |
-
"negotiator": "0.6.2"
|
14 |
-
}
|
15 |
-
},
|
16 |
-
"array-flatten": {
|
17 |
-
"version": "1.1.1",
|
18 |
-
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
|
19 |
-
"integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
|
20 |
-
},
|
21 |
-
"body-parser": {
|
22 |
-
"version": "1.19.0",
|
23 |
-
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
|
24 |
-
"integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
|
25 |
-
"requires": {
|
26 |
-
"bytes": "3.1.0",
|
27 |
-
"content-type": "~1.0.4",
|
28 |
-
"debug": "2.6.9",
|
29 |
-
"depd": "~1.1.2",
|
30 |
-
"http-errors": "1.7.2",
|
31 |
-
"iconv-lite": "0.4.24",
|
32 |
-
"on-finished": "~2.3.0",
|
33 |
-
"qs": "6.7.0",
|
34 |
-
"raw-body": "2.4.0",
|
35 |
-
"type-is": "~1.6.17"
|
36 |
-
}
|
37 |
-
},
|
38 |
-
"bytes": {
|
39 |
-
"version": "3.1.0",
|
40 |
-
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
|
41 |
-
"integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
|
42 |
-
},
|
43 |
-
"content-disposition": {
|
44 |
-
"version": "0.5.3",
|
45 |
-
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz",
|
46 |
-
"integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==",
|
47 |
-
"requires": {
|
48 |
-
"safe-buffer": "5.1.2"
|
49 |
-
}
|
50 |
-
},
|
51 |
-
"content-type": {
|
52 |
-
"version": "1.0.4",
|
53 |
-
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
|
54 |
-
"integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
|
55 |
-
},
|
56 |
-
"cookie": {
|
57 |
-
"version": "0.4.0",
|
58 |
-
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz",
|
59 |
-
"integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg=="
|
60 |
-
},
|
61 |
-
"cookie-signature": {
|
62 |
-
"version": "1.0.6",
|
63 |
-
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
|
64 |
-
"integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
|
65 |
-
},
|
66 |
-
"debug": {
|
67 |
-
"version": "2.6.9",
|
68 |
-
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
69 |
-
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
70 |
-
"requires": {
|
71 |
-
"ms": "2.0.0"
|
72 |
-
}
|
73 |
-
},
|
74 |
-
"depd": {
|
75 |
-
"version": "1.1.2",
|
76 |
-
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
|
77 |
-
"integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
|
78 |
-
},
|
79 |
-
"destroy": {
|
80 |
-
"version": "1.0.4",
|
81 |
-
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
|
82 |
-
"integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
|
83 |
-
},
|
84 |
-
"ee-first": {
|
85 |
-
"version": "1.1.1",
|
86 |
-
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
|
87 |
-
"integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
|
88 |
-
},
|
89 |
-
"encodeurl": {
|
90 |
-
"version": "1.0.2",
|
91 |
-
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
|
92 |
-
"integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
|
93 |
-
},
|
94 |
-
"escape-html": {
|
95 |
-
"version": "1.0.3",
|
96 |
-
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
|
97 |
-
"integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
|
98 |
-
},
|
99 |
-
"etag": {
|
100 |
-
"version": "1.8.1",
|
101 |
-
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
|
102 |
-
"integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
|
103 |
-
},
|
104 |
-
"express": {
|
105 |
-
"version": "4.17.1",
|
106 |
-
"resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz",
|
107 |
-
"integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==",
|
108 |
-
"requires": {
|
109 |
-
"accepts": "~1.3.7",
|
110 |
-
"array-flatten": "1.1.1",
|
111 |
-
"body-parser": "1.19.0",
|
112 |
-
"content-disposition": "0.5.3",
|
113 |
-
"content-type": "~1.0.4",
|
114 |
-
"cookie": "0.4.0",
|
115 |
-
"cookie-signature": "1.0.6",
|
116 |
-
"debug": "2.6.9",
|
117 |
-
"depd": "~1.1.2",
|
118 |
-
"encodeurl": "~1.0.2",
|
119 |
-
"escape-html": "~1.0.3",
|
120 |
-
"etag": "~1.8.1",
|
121 |
-
"finalhandler": "~1.1.2",
|
122 |
-
"fresh": "0.5.2",
|
123 |
-
"merge-descriptors": "1.0.1",
|
124 |
-
"methods": "~1.1.2",
|
125 |
-
"on-finished": "~2.3.0",
|
126 |
-
"parseurl": "~1.3.3",
|
127 |
-
"path-to-regexp": "0.1.7",
|
128 |
-
"proxy-addr": "~2.0.5",
|
129 |
-
"qs": "6.7.0",
|
130 |
-
"range-parser": "~1.2.1",
|
131 |
-
"safe-buffer": "5.1.2",
|
132 |
-
"send": "0.17.1",
|
133 |
-
"serve-static": "1.14.1",
|
134 |
-
"setprototypeof": "1.1.1",
|
135 |
-
"statuses": "~1.5.0",
|
136 |
-
"type-is": "~1.6.18",
|
137 |
-
"utils-merge": "1.0.1",
|
138 |
-
"vary": "~1.1.2"
|
139 |
-
}
|
140 |
-
},
|
141 |
-
"finalhandler": {
|
142 |
-
"version": "1.1.2",
|
143 |
-
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
|
144 |
-
"integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
|
145 |
-
"requires": {
|
146 |
-
"debug": "2.6.9",
|
147 |
-
"encodeurl": "~1.0.2",
|
148 |
-
"escape-html": "~1.0.3",
|
149 |
-
"on-finished": "~2.3.0",
|
150 |
-
"parseurl": "~1.3.3",
|
151 |
-
"statuses": "~1.5.0",
|
152 |
-
"unpipe": "~1.0.0"
|
153 |
-
}
|
154 |
-
},
|
155 |
-
"forwarded": {
|
156 |
-
"version": "0.1.2",
|
157 |
-
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
|
158 |
-
"integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
|
159 |
-
},
|
160 |
-
"fresh": {
|
161 |
-
"version": "0.5.2",
|
162 |
-
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
|
163 |
-
"integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
|
164 |
-
},
|
165 |
-
"http-errors": {
|
166 |
-
"version": "1.7.2",
|
167 |
-
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
|
168 |
-
"integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
|
169 |
-
"requires": {
|
170 |
-
"depd": "~1.1.2",
|
171 |
-
"inherits": "2.0.3",
|
172 |
-
"setprototypeof": "1.1.1",
|
173 |
-
"statuses": ">= 1.5.0 < 2",
|
174 |
-
"toidentifier": "1.0.0"
|
175 |
-
}
|
176 |
-
},
|
177 |
-
"iconv-lite": {
|
178 |
-
"version": "0.4.24",
|
179 |
-
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
|
180 |
-
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
|
181 |
-
"requires": {
|
182 |
-
"safer-buffer": ">= 2.1.2 < 3"
|
183 |
-
}
|
184 |
-
},
|
185 |
-
"inherits": {
|
186 |
-
"version": "2.0.3",
|
187 |
-
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
|
188 |
-
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
|
189 |
-
},
|
190 |
-
"ipaddr.js": {
|
191 |
-
"version": "1.9.1",
|
192 |
-
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
|
193 |
-
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="
|
194 |
-
},
|
195 |
-
"media-typer": {
|
196 |
-
"version": "0.3.0",
|
197 |
-
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
|
198 |
-
"integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
|
199 |
-
},
|
200 |
-
"merge-descriptors": {
|
201 |
-
"version": "1.0.1",
|
202 |
-
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
|
203 |
-
"integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
|
204 |
-
},
|
205 |
-
"methods": {
|
206 |
-
"version": "1.1.2",
|
207 |
-
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
|
208 |
-
"integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
|
209 |
-
},
|
210 |
-
"mime": {
|
211 |
-
"version": "1.6.0",
|
212 |
-
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
|
213 |
-
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
|
214 |
-
},
|
215 |
-
"mime-db": {
|
216 |
-
"version": "1.44.0",
|
217 |
-
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz",
|
218 |
-
"integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg=="
|
219 |
-
},
|
220 |
-
"mime-types": {
|
221 |
-
"version": "2.1.27",
|
222 |
-
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz",
|
223 |
-
"integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==",
|
224 |
-
"requires": {
|
225 |
-
"mime-db": "1.44.0"
|
226 |
-
}
|
227 |
-
},
|
228 |
-
"ms": {
|
229 |
-
"version": "2.0.0",
|
230 |
-
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
231 |
-
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
232 |
-
},
|
233 |
-
"negotiator": {
|
234 |
-
"version": "0.6.2",
|
235 |
-
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
|
236 |
-
"integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw=="
|
237 |
-
},
|
238 |
-
"on-finished": {
|
239 |
-
"version": "2.3.0",
|
240 |
-
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
|
241 |
-
"integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
|
242 |
-
"requires": {
|
243 |
-
"ee-first": "1.1.1"
|
244 |
-
}
|
245 |
-
},
|
246 |
-
"parseurl": {
|
247 |
-
"version": "1.3.3",
|
248 |
-
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
|
249 |
-
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
|
250 |
-
},
|
251 |
-
"path-to-regexp": {
|
252 |
-
"version": "0.1.7",
|
253 |
-
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
|
254 |
-
"integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
|
255 |
-
},
|
256 |
-
"proxy-addr": {
|
257 |
-
"version": "2.0.6",
|
258 |
-
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz",
|
259 |
-
"integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==",
|
260 |
-
"requires": {
|
261 |
-
"forwarded": "~0.1.2",
|
262 |
-
"ipaddr.js": "1.9.1"
|
263 |
-
}
|
264 |
-
},
|
265 |
-
"qs": {
|
266 |
-
"version": "6.7.0",
|
267 |
-
"resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
|
268 |
-
"integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
|
269 |
-
},
|
270 |
-
"range-parser": {
|
271 |
-
"version": "1.2.1",
|
272 |
-
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
|
273 |
-
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
|
274 |
-
},
|
275 |
-
"raw-body": {
|
276 |
-
"version": "2.4.0",
|
277 |
-
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
|
278 |
-
"integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
|
279 |
-
"requires": {
|
280 |
-
"bytes": "3.1.0",
|
281 |
-
"http-errors": "1.7.2",
|
282 |
-
"iconv-lite": "0.4.24",
|
283 |
-
"unpipe": "1.0.0"
|
284 |
-
}
|
285 |
-
},
|
286 |
-
"safe-buffer": {
|
287 |
-
"version": "5.1.2",
|
288 |
-
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
|
289 |
-
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
|
290 |
-
},
|
291 |
-
"safer-buffer": {
|
292 |
-
"version": "2.1.2",
|
293 |
-
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
|
294 |
-
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
|
295 |
-
},
|
296 |
-
"send": {
|
297 |
-
"version": "0.17.1",
|
298 |
-
"resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz",
|
299 |
-
"integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==",
|
300 |
-
"requires": {
|
301 |
-
"debug": "2.6.9",
|
302 |
-
"depd": "~1.1.2",
|
303 |
-
"destroy": "~1.0.4",
|
304 |
-
"encodeurl": "~1.0.2",
|
305 |
-
"escape-html": "~1.0.3",
|
306 |
-
"etag": "~1.8.1",
|
307 |
-
"fresh": "0.5.2",
|
308 |
-
"http-errors": "~1.7.2",
|
309 |
-
"mime": "1.6.0",
|
310 |
-
"ms": "2.1.1",
|
311 |
-
"on-finished": "~2.3.0",
|
312 |
-
"range-parser": "~1.2.1",
|
313 |
-
"statuses": "~1.5.0"
|
314 |
-
},
|
315 |
-
"dependencies": {
|
316 |
-
"ms": {
|
317 |
-
"version": "2.1.1",
|
318 |
-
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
|
319 |
-
"integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
|
320 |
-
}
|
321 |
-
}
|
322 |
-
},
|
323 |
-
"serve-static": {
|
324 |
-
"version": "1.14.1",
|
325 |
-
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz",
|
326 |
-
"integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==",
|
327 |
-
"requires": {
|
328 |
-
"encodeurl": "~1.0.2",
|
329 |
-
"escape-html": "~1.0.3",
|
330 |
-
"parseurl": "~1.3.3",
|
331 |
-
"send": "0.17.1"
|
332 |
-
}
|
333 |
-
},
|
334 |
-
"setprototypeof": {
|
335 |
-
"version": "1.1.1",
|
336 |
-
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
|
337 |
-
"integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw=="
|
338 |
-
},
|
339 |
-
"statuses": {
|
340 |
-
"version": "1.5.0",
|
341 |
-
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
|
342 |
-
"integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow="
|
343 |
-
},
|
344 |
-
"toidentifier": {
|
345 |
-
"version": "1.0.0",
|
346 |
-
"resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
|
347 |
-
"integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw=="
|
348 |
-
},
|
349 |
-
"type-is": {
|
350 |
-
"version": "1.6.18",
|
351 |
-
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
|
352 |
-
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
|
353 |
-
"requires": {
|
354 |
-
"media-typer": "0.3.0",
|
355 |
-
"mime-types": "~2.1.24"
|
356 |
-
}
|
357 |
-
},
|
358 |
-
"unpipe": {
|
359 |
-
"version": "1.0.0",
|
360 |
-
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
|
361 |
-
"integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
|
362 |
-
},
|
363 |
-
"utils-merge": {
|
364 |
-
"version": "1.0.1",
|
365 |
-
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
366 |
-
"integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
|
367 |
-
},
|
368 |
-
"vary": {
|
369 |
-
"version": "1.1.2",
|
370 |
-
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
|
371 |
-
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
|
372 |
-
}
|
373 |
-
}
|
374 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node-shark/package.json
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"name": "nodejs-image-demo",
|
3 |
-
"version": "1.0.0",
|
4 |
-
"description": "nodejs image demo",
|
5 |
-
"author": "Sammy the Shark <sammy@example.com>",
|
6 |
-
"license": "MIT",
|
7 |
-
"main": "app.js",
|
8 |
-
"scripts": {
|
9 |
-
"start": "node app.js",
|
10 |
-
"test": "echo \"Error: no test specified\" && exit 1"
|
11 |
-
},
|
12 |
-
"keywords": [
|
13 |
-
"nodejs",
|
14 |
-
"bootstrap",
|
15 |
-
"express"
|
16 |
-
],
|
17 |
-
"dependencies": {
|
18 |
-
"express": "^4.16.4",
|
19 |
-
"http": "0.0.0"
|
20 |
-
}
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node-shark/views/css/styles.css
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
.navbar {
|
2 |
-
margin-bottom: 0;
|
3 |
-
}
|
4 |
-
|
5 |
-
body {
|
6 |
-
background: #020A1B;
|
7 |
-
color: #ffffff;
|
8 |
-
font-family: 'Merriweather', sans-serif;
|
9 |
-
}
|
10 |
-
h1,
|
11 |
-
h2 {
|
12 |
-
font-weight: bold;
|
13 |
-
}
|
14 |
-
p {
|
15 |
-
font-size: 16px;
|
16 |
-
color: #ffffff;
|
17 |
-
}
|
18 |
-
|
19 |
-
|
20 |
-
.jumbotron {
|
21 |
-
background: #0048CD;
|
22 |
-
color: white;
|
23 |
-
text-align: center;
|
24 |
-
}
|
25 |
-
.jumbotron p {
|
26 |
-
color: white;
|
27 |
-
font-size: 26px;
|
28 |
-
}
|
29 |
-
|
30 |
-
.btn-primary {
|
31 |
-
color: #fff;
|
32 |
-
text-color: #000000;
|
33 |
-
border-color: white;
|
34 |
-
margin-bottom: 5px;
|
35 |
-
}
|
36 |
-
|
37 |
-
img, video, audio {
|
38 |
-
margin-top: 20px;
|
39 |
-
max-width: 80%;
|
40 |
-
}
|
41 |
-
|
42 |
-
div.caption: {
|
43 |
-
float: left;
|
44 |
-
clear: both;
|
45 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node-shark/views/index.html
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
<head>
|
4 |
-
<title>About Sharks</title>
|
5 |
-
<meta charset="utf-8">
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1">
|
7 |
-
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
|
8 |
-
<link href="css/styles.css" rel="stylesheet">
|
9 |
-
<link href='https://fonts.googleapis.com/css?family=Merriweather:400,700' rel='stylesheet' type='text/css'>
|
10 |
-
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
|
11 |
-
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
|
12 |
-
</head>
|
13 |
-
<body>
|
14 |
-
<nav class="navbar navbar-inverse navbar-static-top">
|
15 |
-
<div class="container">
|
16 |
-
<div class="navbar-header">
|
17 |
-
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
|
18 |
-
<span class="sr-only">Toggle navigation</span>
|
19 |
-
<span class="icon-bar"></span>
|
20 |
-
<span class="icon-bar"></span>
|
21 |
-
<span class="icon-bar"></span>
|
22 |
-
</button>
|
23 |
-
<a class="navbar-brand" href="#">Everything Sharks</a>
|
24 |
-
</div>
|
25 |
-
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
|
26 |
-
<ul class="nav navbar-nav mr-auto">
|
27 |
-
<li class="active"><a href="/">Home</a></li>
|
28 |
-
<li><a href="/sharks">Sharks</a></li>
|
29 |
-
</ul>
|
30 |
-
</div>
|
31 |
-
</div>
|
32 |
-
</nav>
|
33 |
-
<div class="jumbotron">
|
34 |
-
<div class="container">
|
35 |
-
<h1>Want to Learn About Sharks?</h1>
|
36 |
-
<p>Are you ready to learn about sharks?</p>
|
37 |
-
<br>
|
38 |
-
<p><a class="btn btn-primary btn-lg" href="/sharks" role="button">Get Shark Info</a></p>
|
39 |
-
</div>
|
40 |
-
</div>
|
41 |
-
<div class="container">
|
42 |
-
<div class="row">
|
43 |
-
<div class="col-md-6">
|
44 |
-
<h3>Not all sharks are alike</h3>
|
45 |
-
<p>Though some are dangerous, sharks generally do not attack humans. Out of the 500 species known to researchers, only 30 have been known to attack humans.</p>
|
46 |
-
</div>
|
47 |
-
<div class="col-md-6">
|
48 |
-
<h3>Sharks are ancient</h3>
|
49 |
-
<p>There is evidence to suggest that sharks lived up to 400 million years ago.</p>
|
50 |
-
</div>
|
51 |
-
</div>
|
52 |
-
</div>
|
53 |
-
</body>
|
54 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
node-shark/views/sharks.html
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
<head>
|
4 |
-
<title>About Sharks</title>
|
5 |
-
<meta charset="utf-8">
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1">
|
7 |
-
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
|
8 |
-
<link href="css/styles.css" rel="stylesheet">
|
9 |
-
<link href='https://fonts.googleapis.com/css?family=Merriweather:400,700' rel='stylesheet' type='text/css'>
|
10 |
-
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
|
11 |
-
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
|
12 |
-
</head>
|
13 |
-
<nav class="navbar navbar-inverse navbar-static-top">
|
14 |
-
<div class="container">
|
15 |
-
<div class="navbar-header">
|
16 |
-
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
|
17 |
-
<span class="sr-only">Toggle navigation</span>
|
18 |
-
<span class="icon-bar"></span>
|
19 |
-
<span class="icon-bar"></span>
|
20 |
-
<span class="icon-bar"></span>
|
21 |
-
</button>
|
22 |
-
<a class="navbar-brand" href="#">Everything Sharks</a>
|
23 |
-
</div>
|
24 |
-
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
|
25 |
-
<ul class="nav navbar-nav mr-auto">
|
26 |
-
<li><a href="/">Home</a></li>
|
27 |
-
<li class="active"><a href="/sharks">Sharks</a></li>
|
28 |
-
</ul>
|
29 |
-
</div>
|
30 |
-
</div>
|
31 |
-
</nav>
|
32 |
-
<div class="jumbotron text-center">
|
33 |
-
<h1>Shark Info</h1>
|
34 |
-
</div>
|
35 |
-
<div class="container">
|
36 |
-
<div class="row">
|
37 |
-
<div class="col-md-6">
|
38 |
-
<p>
|
39 |
-
<div class="caption">Some sharks are known to be dangerous to humans, though many more are not. The sawshark, for example, is not considered a threat to humans.</div>
|
40 |
-
<img src="https://assets.digitalocean.com/articles/docker_node_image/sawshark.jpg" alt="Sawshark">
|
41 |
-
</p>
|
42 |
-
</div>
|
43 |
-
<div class="col-md-6">
|
44 |
-
<p>
|
45 |
-
<div class="caption">Other sharks are known to be friendly and welcoming!</div>
|
46 |
-
<img src="https://assets.digitalocean.com/articles/docker_node_image/sammy.png" alt="Sammy the Shark">
|
47 |
-
</p>
|
48 |
-
</div>
|
49 |
-
</div>
|
50 |
-
</div>
|
51 |
-
</body>
|
52 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/check_modules.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
This script checks if the given modules exist
|
3 |
+
'''
|
4 |
+
|
5 |
+
import sys
|
6 |
+
import pkgutil
|
7 |
+
|
8 |
+
modules = sys.argv[1:]
|
9 |
+
missing_modules = []
|
10 |
+
for m in modules:
|
11 |
+
if pkgutil.find_loader(m) is None:
|
12 |
+
print('module', m, 'not found')
|
13 |
+
exit(1)
|
scripts/functions.sh
CHANGED
@@ -31,7 +31,7 @@ EOF
|
|
31 |
filesize() {
|
32 |
case "$(uname -s)" in
|
33 |
Linux*) stat -c "%s" $1;;
|
34 |
-
Darwin*) stat -f "%z" $1;;
|
35 |
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
36 |
esac
|
37 |
}
|
|
|
31 |
filesize() {
|
32 |
case "$(uname -s)" in
|
33 |
Linux*) stat -c "%s" $1;;
|
34 |
+
Darwin*) /usr/bin/stat -f "%z" $1;;
|
35 |
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
36 |
esac
|
37 |
}
|
scripts/install_status.txt
CHANGED
@@ -1 +1,2 @@
|
|
1 |
|
|
|
|
1 |
|
2 |
+
sd_ui_git_cloned
|
scripts/on_sd_start.sh
ADDED
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cp sd-ui-files/scripts/functions.sh scripts/
|
4 |
+
cp sd-ui-files/scripts/on_env_start.sh scripts/
|
5 |
+
cp sd-ui-files/scripts/bootstrap.sh scripts/
|
6 |
+
cp sd-ui-files/scripts/check_modules.py scripts/
|
7 |
+
|
8 |
+
source ./scripts/functions.sh
|
9 |
+
|
10 |
+
# activate the installer env
|
11 |
+
CONDA_BASEPATH=$(conda info --base)
|
12 |
+
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # avoids the 'shell not initialized' error
|
13 |
+
|
14 |
+
conda activate || fail "Failed to activate conda"
|
15 |
+
|
16 |
+
# remove the old version of the dev console script, if it's still present
|
17 |
+
if [ -e "open_dev_console.sh" ]; then
|
18 |
+
rm "open_dev_console.sh"
|
19 |
+
fi
|
20 |
+
|
21 |
+
python -c "import os; import shutil; frm = 'sd-ui-files/ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'; dst = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface', 'transformers', '9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142'); shutil.copyfile(frm, dst) if os.path.exists(dst) else print(''); print('Hotfixed broken JSON file from OpenAI');"
|
22 |
+
|
23 |
+
# Caution, this file will make your eyes and brain bleed. It's such an unholy mess.
|
24 |
+
# Note to self: Please rewrite this in Python. For the sake of your own sanity.
|
25 |
+
|
26 |
+
# set the correct installer path (current vs legacy)
|
27 |
+
if [ -e "installer_files/env" ]; then
|
28 |
+
export INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
29 |
+
fi
|
30 |
+
if [ -e "stable-diffusion/env" ]; then
|
31 |
+
export INSTALL_ENV_DIR="$(pwd)/stable-diffusion/env"
|
32 |
+
fi
|
33 |
+
|
34 |
+
# create the stable-diffusion folder, to work with legacy installations
|
35 |
+
if [ ! -e "stable-diffusion" ]; then mkdir stable-diffusion; fi
|
36 |
+
cd stable-diffusion
|
37 |
+
|
38 |
+
# activate the old stable-diffusion env, if it exists
|
39 |
+
if [ -e "env" ]; then
|
40 |
+
conda activate ./env || fail "conda activate failed"
|
41 |
+
fi
|
42 |
+
|
43 |
+
# disable the legacy src and ldm folder (otherwise this prevents installing gfpgan and realesrgan)
|
44 |
+
if [ -e "src" ]; then mv src src-old; fi
|
45 |
+
if [ -e "ldm" ]; then mv ldm ldm-old; fi
|
46 |
+
|
47 |
+
mkdir -p "../models/stable-diffusion"
|
48 |
+
mkdir -p "../models/gfpgan"
|
49 |
+
mkdir -p "../models/realesrgan"
|
50 |
+
mkdir -p "../models/vae"
|
51 |
+
|
52 |
+
# migrate the legacy models to the correct path (if already downloaded)
|
53 |
+
if [ -e "sd-v1-4.ckpt" ]; then mv sd-v1-4.ckpt ../models/stable-diffusion/; fi
|
54 |
+
if [ -e "custom-model.ckpt" ]; then mv custom-model.ckpt ../models/stable-diffusion/; fi
|
55 |
+
if [ -e "GFPGANv1.3.pth" ]; then mv GFPGANv1.3.pth ../models/gfpgan/; fi
|
56 |
+
if [ -e "RealESRGAN_x4plus.pth" ]; then mv RealESRGAN_x4plus.pth ../models/realesrgan/; fi
|
57 |
+
if [ -e "RealESRGAN_x4plus_anime_6B.pth" ]; then mv RealESRGAN_x4plus_anime_6B.pth ../models/realesrgan/; fi
|
58 |
+
|
59 |
+
OS_NAME=$(uname -s)
|
60 |
+
case "${OS_NAME}" in
|
61 |
+
Linux*) OS_NAME="linux";;
|
62 |
+
Darwin*) OS_NAME="macos";;
|
63 |
+
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
64 |
+
esac
|
65 |
+
|
66 |
+
# install torch and torchvision
|
67 |
+
if python ../scripts/check_modules.py torch torchvision; then
|
68 |
+
# temp fix for installations that installed torch 2.0 by mistake
|
69 |
+
if [ "$OS_NAME" == "linux" ]; then
|
70 |
+
python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 -q
|
71 |
+
elif [ "$OS_NAME" == "macos" ]; then
|
72 |
+
python -m pip install --upgrade torch==1.13.1 torchvision==0.14.1 -q
|
73 |
+
fi
|
74 |
+
|
75 |
+
echo "torch and torchvision have already been installed."
|
76 |
+
else
|
77 |
+
echo "Installing torch and torchvision.."
|
78 |
+
|
79 |
+
export PYTHONNOUSERSITE=1
|
80 |
+
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
81 |
+
|
82 |
+
if [ "$OS_NAME" == "linux" ]; then
|
83 |
+
if python -m pip install --upgrade torch==1.13.1+cu116 torchvision==0.14.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 ; then
|
84 |
+
echo "Installed."
|
85 |
+
else
|
86 |
+
fail "torch install failed"
|
87 |
+
fi
|
88 |
+
elif [ "$OS_NAME" == "macos" ]; then
|
89 |
+
if python -m pip install --upgrade torch==1.13.1 torchvision==0.14.1 ; then
|
90 |
+
echo "Installed."
|
91 |
+
else
|
92 |
+
fail "torch install failed"
|
93 |
+
fi
|
94 |
+
fi
|
95 |
+
fi
|
96 |
+
|
97 |
+
# install/upgrade sdkit
|
98 |
+
if python ../scripts/check_modules.py sdkit sdkit.models ldm transformers numpy antlr4 gfpgan realesrgan ; then
|
99 |
+
echo "sdkit is already installed."
|
100 |
+
|
101 |
+
# skip sdkit upgrade if in developer-mode
|
102 |
+
if [ ! -e "../src/sdkit" ]; then
|
103 |
+
export PYTHONNOUSERSITE=1
|
104 |
+
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
105 |
+
|
106 |
+
python -m pip install --upgrade sdkit==1.0.48 -q
|
107 |
+
fi
|
108 |
+
else
|
109 |
+
echo "Installing sdkit: https://pypi.org/project/sdkit/"
|
110 |
+
|
111 |
+
export PYTHONNOUSERSITE=1
|
112 |
+
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
113 |
+
|
114 |
+
if python -m pip install sdkit==1.0.48 ; then
|
115 |
+
echo "Installed."
|
116 |
+
else
|
117 |
+
fail "sdkit install failed"
|
118 |
+
fi
|
119 |
+
fi
|
120 |
+
|
121 |
+
python -c "from importlib.metadata import version; print('sdkit version:', version('sdkit'))"
|
122 |
+
|
123 |
+
# upgrade stable-diffusion-sdkit
|
124 |
+
python -m pip install --upgrade stable-diffusion-sdkit==2.1.4 -q
|
125 |
+
python -c "from importlib.metadata import version; print('stable-diffusion version:', version('stable-diffusion-sdkit'))"
|
126 |
+
|
127 |
+
# install rich
|
128 |
+
if python ../scripts/check_modules.py rich; then
|
129 |
+
echo "rich has already been installed."
|
130 |
+
else
|
131 |
+
echo "Installing rich.."
|
132 |
+
|
133 |
+
export PYTHONNOUSERSITE=1
|
134 |
+
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
135 |
+
|
136 |
+
if python -m pip install rich ; then
|
137 |
+
echo "Installed."
|
138 |
+
else
|
139 |
+
fail "Install failed for rich"
|
140 |
+
fi
|
141 |
+
fi
|
142 |
+
|
143 |
+
if python ../scripts/check_modules.py uvicorn fastapi ; then
|
144 |
+
echo "Packages necessary for Easy Diffusion were already installed"
|
145 |
+
else
|
146 |
+
printf "\n\nDownloading packages necessary for Easy Diffusion..\n\n"
|
147 |
+
|
148 |
+
export PYTHONNOUSERSITE=1
|
149 |
+
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
150 |
+
|
151 |
+
if conda install -c conda-forge -y uvicorn fastapi ; then
|
152 |
+
echo "Installed. Testing.."
|
153 |
+
else
|
154 |
+
fail "'conda install uvicorn' failed"
|
155 |
+
fi
|
156 |
+
|
157 |
+
if ! command -v uvicorn &> /dev/null; then
|
158 |
+
fail "UI packages not found!"
|
159 |
+
fi
|
160 |
+
fi
|
161 |
+
|
162 |
+
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
|
163 |
+
model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
|
164 |
+
|
165 |
+
if [ "$model_size" -eq "4265380512" ] || [ "$model_size" -eq "7703807346" ] || [ "$model_size" -eq "7703810927" ]; then
|
166 |
+
echo "Data files (weights) necessary for Stable Diffusion were already downloaded"
|
167 |
+
else
|
168 |
+
printf "\n\nThe model file present at models/stable-diffusion/sd-v1-4.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
|
169 |
+
rm ../models/stable-diffusion/sd-v1-4.ckpt
|
170 |
+
fi
|
171 |
+
fi
|
172 |
+
|
173 |
+
if [ ! -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
|
174 |
+
echo "Downloading data files (weights) for Stable Diffusion.."
|
175 |
+
|
176 |
+
curl -L -k https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt > ../models/stable-diffusion/sd-v1-4.ckpt
|
177 |
+
|
178 |
+
if [ -f "../models/stable-diffusion/sd-v1-4.ckpt" ]; then
|
179 |
+
model_size=`filesize "../models/stable-diffusion/sd-v1-4.ckpt"`
|
180 |
+
if [ ! "$model_size" == "4265380512" ]; then
|
181 |
+
fail "The downloaded model file was invalid! Bytes downloaded: $model_size"
|
182 |
+
fi
|
183 |
+
else
|
184 |
+
fail "Error downloading the data files (weights) for Stable Diffusion"
|
185 |
+
fi
|
186 |
+
fi
|
187 |
+
|
188 |
+
|
189 |
+
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
|
190 |
+
model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
|
191 |
+
|
192 |
+
if [ "$model_size" -eq "348632874" ]; then
|
193 |
+
echo "Data files (weights) necessary for GFPGAN (Face Correction) were already downloaded"
|
194 |
+
else
|
195 |
+
printf "\n\nThe model file present at models/gfpgan/GFPGANv1.3.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
196 |
+
rm ../models/gfpgan/GFPGANv1.3.pth
|
197 |
+
fi
|
198 |
+
fi
|
199 |
+
|
200 |
+
if [ ! -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
|
201 |
+
echo "Downloading data files (weights) for GFPGAN (Face Correction).."
|
202 |
+
|
203 |
+
curl -L -k https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth > ../models/gfpgan/GFPGANv1.3.pth
|
204 |
+
|
205 |
+
if [ -f "../models/gfpgan/GFPGANv1.3.pth" ]; then
|
206 |
+
model_size=`filesize "../models/gfpgan/GFPGANv1.3.pth"`
|
207 |
+
if [ ! "$model_size" -eq "348632874" ]; then
|
208 |
+
fail "The downloaded GFPGAN model file was invalid! Bytes downloaded: $model_size"
|
209 |
+
fi
|
210 |
+
else
|
211 |
+
fail "Error downloading the data files (weights) for GFPGAN (Face Correction)."
|
212 |
+
fi
|
213 |
+
fi
|
214 |
+
|
215 |
+
|
216 |
+
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
|
217 |
+
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
|
218 |
+
|
219 |
+
if [ "$model_size" -eq "67040989" ]; then
|
220 |
+
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus were already downloaded"
|
221 |
+
else
|
222 |
+
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
223 |
+
rm ../models/realesrgan/RealESRGAN_x4plus.pth
|
224 |
+
fi
|
225 |
+
fi
|
226 |
+
|
227 |
+
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
|
228 |
+
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus.."
|
229 |
+
|
230 |
+
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth > ../models/realesrgan/RealESRGAN_x4plus.pth
|
231 |
+
|
232 |
+
if [ -f "../models/realesrgan/RealESRGAN_x4plus.pth" ]; then
|
233 |
+
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus.pth"`
|
234 |
+
if [ ! "$model_size" -eq "67040989" ]; then
|
235 |
+
fail "The downloaded ESRGAN x4plus model file was invalid! Bytes downloaded: $model_size"
|
236 |
+
fi
|
237 |
+
else
|
238 |
+
fail "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus"
|
239 |
+
fi
|
240 |
+
fi
|
241 |
+
|
242 |
+
|
243 |
+
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
|
244 |
+
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
|
245 |
+
|
246 |
+
if [ "$model_size" -eq "17938799" ]; then
|
247 |
+
echo "Data files (weights) necessary for ESRGAN (Resolution Upscaling) x4plus_anime were already downloaded"
|
248 |
+
else
|
249 |
+
printf "\n\nThe model file present at models/realesrgan/RealESRGAN_x4plus_anime_6B.pth is invalid. It is only $model_size bytes in size. Re-downloading.."
|
250 |
+
rm ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
|
251 |
+
fi
|
252 |
+
fi
|
253 |
+
|
254 |
+
if [ ! -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
|
255 |
+
echo "Downloading data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime.."
|
256 |
+
|
257 |
+
curl -L -k https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth > ../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth
|
258 |
+
|
259 |
+
if [ -f "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth" ]; then
|
260 |
+
model_size=`filesize "../models/realesrgan/RealESRGAN_x4plus_anime_6B.pth"`
|
261 |
+
if [ ! "$model_size" -eq "17938799" ]; then
|
262 |
+
fail "The downloaded ESRGAN x4plus_anime model file was invalid! Bytes downloaded: $model_size"
|
263 |
+
fi
|
264 |
+
else
|
265 |
+
fail "Error downloading the data files (weights) for ESRGAN (Resolution Upscaling) x4plus_anime."
|
266 |
+
fi
|
267 |
+
fi
|
268 |
+
|
269 |
+
|
270 |
+
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
271 |
+
model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
|
272 |
+
|
273 |
+
if [ "$model_size" -eq "334695179" ]; then
|
274 |
+
echo "Data files (weights) necessary for the default VAE (sd-vae-ft-mse-original) were already downloaded"
|
275 |
+
else
|
276 |
+
printf "\n\nThe model file present at models/vae/vae-ft-mse-840000-ema-pruned.ckpt is invalid. It is only $model_size bytes in size. Re-downloading.."
|
277 |
+
rm ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
|
278 |
+
fi
|
279 |
+
fi
|
280 |
+
|
281 |
+
if [ ! -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
282 |
+
echo "Downloading data files (weights) for the default VAE (sd-vae-ft-mse-original).."
|
283 |
+
|
284 |
+
curl -L -k https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt > ../models/vae/vae-ft-mse-840000-ema-pruned.ckpt
|
285 |
+
|
286 |
+
if [ -f "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt" ]; then
|
287 |
+
model_size=`filesize "../models/vae/vae-ft-mse-840000-ema-pruned.ckpt"`
|
288 |
+
if [ ! "$model_size" -eq "334695179" ]; then
|
289 |
+
printf "\n\nError: The downloaded default VAE (sd-vae-ft-mse-original) file was invalid! Bytes downloaded: $model_size\n\n"
|
290 |
+
printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
|
291 |
+
read -p "Press any key to continue"
|
292 |
+
exit
|
293 |
+
fi
|
294 |
+
else
|
295 |
+
printf "\n\nError downloading the data files (weights) for the default VAE (sd-vae-ft-mse-original). Sorry about that, please try to:\n 1. Run this installer again.\n 2. If that doesn't fix it, please try the common troubleshooting steps at https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting\n 3. If those steps don't help, please copy *all* the error messages in this window, and ask the community at https://discord.com/invite/u9yhsFmEkB\n 4. If that doesn't solve the problem, please file an issue at https://github.com/cmdr2/stable-diffusion-ui/issues\nThanks!\n\n"
|
296 |
+
read -p "Press any key to continue"
|
297 |
+
exit
|
298 |
+
fi
|
299 |
+
fi
|
300 |
+
|
301 |
+
if [ `grep -c sd_install_complete ../scripts/install_status.txt` -gt "0" ]; then
|
302 |
+
echo sd_weights_downloaded >> ../scripts/install_status.txt
|
303 |
+
echo sd_install_complete >> ../scripts/install_status.txt
|
304 |
+
fi
|
305 |
+
|
306 |
+
printf "\n\nEasy Diffusion installation complete, starting the server!\n\n"
|
307 |
+
|
308 |
+
SD_PATH=`pwd`
|
309 |
+
|
310 |
+
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
311 |
+
export PYTHONPATH="$INSTALL_ENV_DIR/lib/python3.8/site-packages"
|
312 |
+
echo "PYTHONPATH=$PYTHONPATH"
|
313 |
+
|
314 |
+
which python
|
315 |
+
python --version
|
316 |
+
|
317 |
+
cd ..
|
318 |
+
export SD_UI_PATH=`pwd`/ui
|
319 |
+
cd stable-diffusion
|
320 |
+
|
321 |
+
uvicorn main:server_api --app-dir "$SD_UI_PATH" --port ${SD_UI_BIND_PORT:-9000} --host ${SD_UI_BIND_IP:-0.0.0.0} --log-level error
|
322 |
+
|
323 |
+
read -p "Press any key to continue"
|
ui/__pycache__/main.cpython-38.pyc
ADDED
Binary file (331 Bytes). View file
|
|
ui/easydiffusion/__init__.py
ADDED
File without changes
|
ui/easydiffusion/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (152 Bytes). View file
|
|
ui/easydiffusion/__pycache__/app.cpython-38.pyc
ADDED
Binary file (9.25 kB). View file
|
|
ui/easydiffusion/__pycache__/device_manager.cpython-38.pyc
ADDED
Binary file (6.7 kB). View file
|
|
ui/easydiffusion/__pycache__/model_manager.cpython-38.pyc
ADDED
Binary file (7 kB). View file
|
|
ui/easydiffusion/__pycache__/renderer.cpython-38.pyc
ADDED
Binary file (5.05 kB). View file
|
|
ui/easydiffusion/__pycache__/server.cpython-38.pyc
ADDED
Binary file (10.4 kB). View file
|
|
ui/easydiffusion/__pycache__/task_manager.cpython-38.pyc
ADDED
Binary file (15.5 kB). View file
|
|
ui/easydiffusion/__pycache__/types.cpython-38.pyc
ADDED
Binary file (3.47 kB). View file
|
|
ui/easydiffusion/app.py
ADDED
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import socket
|
3 |
+
import sys
|
4 |
+
import json
|
5 |
+
import traceback
|
6 |
+
import logging
|
7 |
+
import shlex
|
8 |
+
import urllib
|
9 |
+
from rich.logging import RichHandler
|
10 |
+
|
11 |
+
from sdkit.utils import log as sdkit_log # hack, so we can overwrite the log config
|
12 |
+
|
13 |
+
from easydiffusion import task_manager
|
14 |
+
from easydiffusion.utils import log
|
15 |
+
|
16 |
+
# Remove all handlers associated with the root logger object.
|
17 |
+
for handler in logging.root.handlers[:]:
|
18 |
+
logging.root.removeHandler(handler)
|
19 |
+
|
20 |
+
LOG_FORMAT = "%(asctime)s.%(msecs)03d %(levelname)s %(threadName)s %(message)s"
|
21 |
+
logging.basicConfig(
|
22 |
+
level=logging.INFO,
|
23 |
+
format=LOG_FORMAT,
|
24 |
+
datefmt="%X",
|
25 |
+
handlers=[RichHandler(markup=True, rich_tracebacks=False, show_time=False, show_level=False)],
|
26 |
+
)
|
27 |
+
|
28 |
+
SD_DIR = os.getcwd()
|
29 |
+
|
30 |
+
SD_UI_DIR = os.getenv("SD_UI_PATH", None)
|
31 |
+
|
32 |
+
CONFIG_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "..", "scripts"))
|
33 |
+
MODELS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "models"))
|
34 |
+
|
35 |
+
USER_PLUGINS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "plugins"))
|
36 |
+
CORE_PLUGINS_DIR = os.path.abspath(os.path.join(SD_UI_DIR, "plugins"))
|
37 |
+
|
38 |
+
USER_UI_PLUGINS_DIR = os.path.join(USER_PLUGINS_DIR, "ui")
|
39 |
+
CORE_UI_PLUGINS_DIR = os.path.join(CORE_PLUGINS_DIR, "ui")
|
40 |
+
USER_SERVER_PLUGINS_DIR = os.path.join(USER_PLUGINS_DIR, "server")
|
41 |
+
UI_PLUGINS_SOURCES = ((CORE_UI_PLUGINS_DIR, "core"), (USER_UI_PLUGINS_DIR, "user"))
|
42 |
+
|
43 |
+
sys.path.append(os.path.dirname(SD_UI_DIR))
|
44 |
+
sys.path.append(USER_SERVER_PLUGINS_DIR)
|
45 |
+
|
46 |
+
OUTPUT_DIRNAME = "Stable Diffusion UI" # in the user's home folder
|
47 |
+
PRESERVE_CONFIG_VARS = ["FORCE_FULL_PRECISION"]
|
48 |
+
TASK_TTL = 15 * 60 # Discard last session's task timeout
|
49 |
+
APP_CONFIG_DEFAULTS = {
|
50 |
+
# auto: selects the cuda device with the most free memory, cuda: use the currently active cuda device.
|
51 |
+
"render_devices": "auto", # valid entries: 'auto', 'cpu' or 'cuda:N' (where N is a GPU index)
|
52 |
+
"update_branch": "main",
|
53 |
+
"ui": {
|
54 |
+
"open_browser_on_start": True,
|
55 |
+
},
|
56 |
+
}
|
57 |
+
|
58 |
+
IMAGE_EXTENSIONS = [".png", ".apng", ".jpg", ".jpeg", ".jfif", ".pjpeg", ".pjp", ".jxl", ".gif", ".webp", ".avif", ".svg"]
|
59 |
+
CUSTOM_MODIFIERS_DIR = os.path.abspath(os.path.join(SD_DIR, "..", "modifiers"))
|
60 |
+
CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS=[".portrait", "_portrait", " portrait", "-portrait"]
|
61 |
+
CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS=[".landscape", "_landscape", " landscape", "-landscape"]
|
62 |
+
|
63 |
+
def init():
|
64 |
+
os.makedirs(USER_UI_PLUGINS_DIR, exist_ok=True)
|
65 |
+
os.makedirs(USER_SERVER_PLUGINS_DIR, exist_ok=True)
|
66 |
+
|
67 |
+
load_server_plugins()
|
68 |
+
|
69 |
+
update_render_threads()
|
70 |
+
|
71 |
+
|
72 |
+
def getConfig(default_val=APP_CONFIG_DEFAULTS):
|
73 |
+
try:
|
74 |
+
config_json_path = os.path.join(CONFIG_DIR, "config.json")
|
75 |
+
if not os.path.exists(config_json_path):
|
76 |
+
config = default_val
|
77 |
+
else:
|
78 |
+
with open(config_json_path, "r", encoding="utf-8") as f:
|
79 |
+
config = json.load(f)
|
80 |
+
if "net" not in config:
|
81 |
+
config["net"] = {}
|
82 |
+
if os.getenv("SD_UI_BIND_PORT") is not None:
|
83 |
+
config["net"]["listen_port"] = int(os.getenv("SD_UI_BIND_PORT"))
|
84 |
+
else:
|
85 |
+
config["net"]["listen_port"] = 9000
|
86 |
+
if os.getenv("SD_UI_BIND_IP") is not None:
|
87 |
+
config["net"]["listen_to_network"] = os.getenv("SD_UI_BIND_IP") == "0.0.0.0"
|
88 |
+
else:
|
89 |
+
config["net"]["listen_to_network"] = True
|
90 |
+
return config
|
91 |
+
except Exception as e:
|
92 |
+
log.warn(traceback.format_exc())
|
93 |
+
return default_val
|
94 |
+
|
95 |
+
|
96 |
+
def setConfig(config):
|
97 |
+
try: # config.json
|
98 |
+
config_json_path = os.path.join(CONFIG_DIR, "config.json")
|
99 |
+
with open(config_json_path, "w", encoding="utf-8") as f:
|
100 |
+
json.dump(config, f)
|
101 |
+
except:
|
102 |
+
log.error(traceback.format_exc())
|
103 |
+
|
104 |
+
try: # config.bat
|
105 |
+
config_bat_path = os.path.join(CONFIG_DIR, "config.bat")
|
106 |
+
config_bat = []
|
107 |
+
|
108 |
+
if "update_branch" in config:
|
109 |
+
config_bat.append(f"@set update_branch={config['update_branch']}")
|
110 |
+
|
111 |
+
config_bat.append(f"@set SD_UI_BIND_PORT={config['net']['listen_port']}")
|
112 |
+
bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
|
113 |
+
config_bat.append(f"@set SD_UI_BIND_IP={bind_ip}")
|
114 |
+
|
115 |
+
# Preserve these variables if they are set
|
116 |
+
for var in PRESERVE_CONFIG_VARS:
|
117 |
+
if os.getenv(var) is not None:
|
118 |
+
config_bat.append(f"@set {var}={os.getenv(var)}")
|
119 |
+
|
120 |
+
if len(config_bat) > 0:
|
121 |
+
with open(config_bat_path, "w", encoding="utf-8") as f:
|
122 |
+
f.write("\n".join(config_bat))
|
123 |
+
except:
|
124 |
+
log.error(traceback.format_exc())
|
125 |
+
|
126 |
+
try: # config.sh
|
127 |
+
config_sh_path = os.path.join(CONFIG_DIR, "config.sh")
|
128 |
+
config_sh = ["#!/bin/bash"]
|
129 |
+
|
130 |
+
if "update_branch" in config:
|
131 |
+
config_sh.append(f"export update_branch={config['update_branch']}")
|
132 |
+
|
133 |
+
config_sh.append(f"export SD_UI_BIND_PORT={config['net']['listen_port']}")
|
134 |
+
bind_ip = "0.0.0.0" if config["net"]["listen_to_network"] else "127.0.0.1"
|
135 |
+
config_sh.append(f"export SD_UI_BIND_IP={bind_ip}")
|
136 |
+
|
137 |
+
# Preserve these variables if they are set
|
138 |
+
for var in PRESERVE_CONFIG_VARS:
|
139 |
+
if os.getenv(var) is not None:
|
140 |
+
config_bat.append(f'export {var}="{shlex.quote(os.getenv(var))}"')
|
141 |
+
|
142 |
+
if len(config_sh) > 1:
|
143 |
+
with open(config_sh_path, "w", encoding="utf-8") as f:
|
144 |
+
f.write("\n".join(config_sh))
|
145 |
+
except:
|
146 |
+
log.error(traceback.format_exc())
|
147 |
+
|
148 |
+
|
149 |
+
def save_to_config(ckpt_model_name, vae_model_name, hypernetwork_model_name, vram_usage_level):
|
150 |
+
config = getConfig()
|
151 |
+
if "model" not in config:
|
152 |
+
config["model"] = {}
|
153 |
+
|
154 |
+
config["model"]["stable-diffusion"] = ckpt_model_name
|
155 |
+
config["model"]["vae"] = vae_model_name
|
156 |
+
config["model"]["hypernetwork"] = hypernetwork_model_name
|
157 |
+
|
158 |
+
if vae_model_name is None or vae_model_name == "":
|
159 |
+
del config["model"]["vae"]
|
160 |
+
if hypernetwork_model_name is None or hypernetwork_model_name == "":
|
161 |
+
del config["model"]["hypernetwork"]
|
162 |
+
|
163 |
+
config["vram_usage_level"] = vram_usage_level
|
164 |
+
|
165 |
+
setConfig(config)
|
166 |
+
|
167 |
+
|
168 |
+
def update_render_threads():
|
169 |
+
config = getConfig()
|
170 |
+
render_devices = config.get("render_devices", "auto")
|
171 |
+
active_devices = task_manager.get_devices()["active"].keys()
|
172 |
+
|
173 |
+
log.debug(f"requesting for render_devices: {render_devices}")
|
174 |
+
task_manager.update_render_threads(render_devices, active_devices)
|
175 |
+
|
176 |
+
|
177 |
+
def getUIPlugins():
|
178 |
+
plugins = []
|
179 |
+
|
180 |
+
for plugins_dir, dir_prefix in UI_PLUGINS_SOURCES:
|
181 |
+
for file in os.listdir(plugins_dir):
|
182 |
+
if file.endswith(".plugin.js"):
|
183 |
+
plugins.append(f"/plugins/{dir_prefix}/{file}")
|
184 |
+
|
185 |
+
return plugins
|
186 |
+
|
187 |
+
|
188 |
+
def load_server_plugins():
|
189 |
+
if not os.path.exists(USER_SERVER_PLUGINS_DIR):
|
190 |
+
return
|
191 |
+
|
192 |
+
import importlib
|
193 |
+
|
194 |
+
def load_plugin(file):
|
195 |
+
mod_path = file.replace(".py", "")
|
196 |
+
return importlib.import_module(mod_path)
|
197 |
+
|
198 |
+
def apply_plugin(file, plugin):
|
199 |
+
if hasattr(plugin, "get_cond_and_uncond"):
|
200 |
+
import sdkit.generate.image_generator
|
201 |
+
|
202 |
+
sdkit.generate.image_generator.get_cond_and_uncond = plugin.get_cond_and_uncond
|
203 |
+
log.info(f"Overridden get_cond_and_uncond with the one in the server plugin: {file}")
|
204 |
+
|
205 |
+
for file in os.listdir(USER_SERVER_PLUGINS_DIR):
|
206 |
+
file_path = os.path.join(USER_SERVER_PLUGINS_DIR, file)
|
207 |
+
if (not os.path.isdir(file_path) and not file_path.endswith("_plugin.py")) or (
|
208 |
+
os.path.isdir(file_path) and not file_path.endswith("_plugin")
|
209 |
+
):
|
210 |
+
continue
|
211 |
+
|
212 |
+
try:
|
213 |
+
log.info(f"Loading server plugin: {file}")
|
214 |
+
mod = load_plugin(file)
|
215 |
+
|
216 |
+
log.info(f"Applying server plugin: {file}")
|
217 |
+
apply_plugin(file, mod)
|
218 |
+
except:
|
219 |
+
log.warn(f"Error while loading a server plugin")
|
220 |
+
log.warn(traceback.format_exc())
|
221 |
+
|
222 |
+
|
223 |
+
def getIPConfig():
|
224 |
+
try:
|
225 |
+
ips = socket.gethostbyname_ex(socket.gethostname())
|
226 |
+
ips[2].append(ips[0])
|
227 |
+
return ips[2]
|
228 |
+
except Exception as e:
|
229 |
+
log.exception(e)
|
230 |
+
return []
|
231 |
+
|
232 |
+
|
233 |
+
def open_browser():
|
234 |
+
config = getConfig()
|
235 |
+
ui = config.get("ui", {})
|
236 |
+
net = config.get("net", {"listen_port": 9000})
|
237 |
+
port = net.get("listen_port", 9000)
|
238 |
+
if ui.get("open_browser_on_start", True):
|
239 |
+
import webbrowser
|
240 |
+
|
241 |
+
webbrowser.open(f"http://localhost:{port}")
|
242 |
+
|
243 |
+
def get_image_modifiers():
|
244 |
+
modifiers_json_path = os.path.join(SD_UI_DIR, "modifiers.json")
|
245 |
+
|
246 |
+
modifier_categories = {}
|
247 |
+
original_category_order=[]
|
248 |
+
with open(modifiers_json_path, "r", encoding="utf-8") as f:
|
249 |
+
modifiers_file = json.load(f)
|
250 |
+
|
251 |
+
# The trailing slash is needed to support symlinks
|
252 |
+
if not os.path.isdir(f"{CUSTOM_MODIFIERS_DIR}/"):
|
253 |
+
return modifiers_file
|
254 |
+
|
255 |
+
# convert modifiers from a list of objects to a dict of dicts
|
256 |
+
for category_item in modifiers_file:
|
257 |
+
category_name = category_item['category']
|
258 |
+
original_category_order.append(category_name)
|
259 |
+
category = {}
|
260 |
+
for modifier_item in category_item['modifiers']:
|
261 |
+
modifier = {}
|
262 |
+
for preview_item in modifier_item['previews']:
|
263 |
+
modifier[preview_item['name']] = preview_item['path']
|
264 |
+
category[modifier_item['modifier']] = modifier
|
265 |
+
modifier_categories[category_name] = category
|
266 |
+
|
267 |
+
def scan_directory(directory_path: str, category_name="Modifiers"):
|
268 |
+
for entry in os.scandir(directory_path):
|
269 |
+
if entry.is_file():
|
270 |
+
file_extension = list(filter(lambda e: entry.name.endswith(e), IMAGE_EXTENSIONS))
|
271 |
+
if len(file_extension) == 0:
|
272 |
+
continue
|
273 |
+
|
274 |
+
modifier_name = entry.name[: -len(file_extension[0])]
|
275 |
+
modifier_path = f"custom/{entry.path[len(CUSTOM_MODIFIERS_DIR) + 1:]}"
|
276 |
+
# URL encode path segments
|
277 |
+
modifier_path = "/".join(map(lambda segment: urllib.parse.quote(segment), modifier_path.split("/")))
|
278 |
+
is_portrait = True
|
279 |
+
is_landscape = True
|
280 |
+
|
281 |
+
portrait_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_PORTRAIT_EXTENSIONS))
|
282 |
+
landscape_extension = list(filter(lambda e: modifier_name.lower().endswith(e), CUSTOM_MODIFIERS_LANDSCAPE_EXTENSIONS))
|
283 |
+
|
284 |
+
if len(portrait_extension) > 0:
|
285 |
+
is_landscape = False
|
286 |
+
modifier_name = modifier_name[: -len(portrait_extension[0])]
|
287 |
+
elif len(landscape_extension) > 0:
|
288 |
+
is_portrait = False
|
289 |
+
modifier_name = modifier_name[: -len(landscape_extension[0])]
|
290 |
+
|
291 |
+
if (category_name not in modifier_categories):
|
292 |
+
modifier_categories[category_name] = {}
|
293 |
+
|
294 |
+
category = modifier_categories[category_name]
|
295 |
+
|
296 |
+
if (modifier_name not in category):
|
297 |
+
category[modifier_name] = {}
|
298 |
+
|
299 |
+
if (is_portrait or "portrait" not in category[modifier_name]):
|
300 |
+
category[modifier_name]["portrait"] = modifier_path
|
301 |
+
|
302 |
+
if (is_landscape or "landscape" not in category[modifier_name]):
|
303 |
+
category[modifier_name]["landscape"] = modifier_path
|
304 |
+
elif entry.is_dir():
|
305 |
+
scan_directory(
|
306 |
+
entry.path,
|
307 |
+
entry.name if directory_path==CUSTOM_MODIFIERS_DIR else f"{category_name}/{entry.name}",
|
308 |
+
)
|
309 |
+
|
310 |
+
scan_directory(CUSTOM_MODIFIERS_DIR)
|
311 |
+
|
312 |
+
custom_categories = sorted(
|
313 |
+
[cn for cn in modifier_categories.keys() if cn not in original_category_order],
|
314 |
+
key=str.casefold,
|
315 |
+
)
|
316 |
+
|
317 |
+
# convert the modifiers back into a list of objects
|
318 |
+
modifier_categories_list = []
|
319 |
+
for category_name in [*original_category_order, *custom_categories]:
|
320 |
+
category = { 'category': category_name, 'modifiers': [] }
|
321 |
+
for modifier_name in sorted(modifier_categories[category_name].keys(), key=str.casefold):
|
322 |
+
modifier = { 'modifier': modifier_name, 'previews': [] }
|
323 |
+
for preview_name, preview_path in modifier_categories[category_name][modifier_name].items():
|
324 |
+
modifier['previews'].append({ 'name': preview_name, 'path': preview_path })
|
325 |
+
category['modifiers'].append(modifier)
|
326 |
+
modifier_categories_list.append(category)
|
327 |
+
|
328 |
+
return modifier_categories_list
|
ui/easydiffusion/device_manager.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import platform
|
3 |
+
import torch
|
4 |
+
import traceback
|
5 |
+
import re
|
6 |
+
|
7 |
+
from easydiffusion.utils import log
|
8 |
+
|
9 |
+
"""
|
10 |
+
Set `FORCE_FULL_PRECISION` in the environment variables, or in `config.bat`/`config.sh` to set full precision (i.e. float32).
|
11 |
+
Otherwise the models will load at half-precision (i.e. float16).
|
12 |
+
|
13 |
+
Half-precision is fine most of the time. Full precision is only needed for working around GPU bugs (like NVIDIA 16xx GPUs).
|
14 |
+
"""
|
15 |
+
|
16 |
+
COMPARABLE_GPU_PERCENTILE = (
|
17 |
+
0.65 # if a GPU's free_mem is within this % of the GPU with the most free_mem, it will be picked
|
18 |
+
)
|
19 |
+
|
20 |
+
mem_free_threshold = 0
|
21 |
+
|
22 |
+
|
23 |
+
def get_device_delta(render_devices, active_devices):
|
24 |
+
"""
|
25 |
+
render_devices: 'cpu', or 'auto', or 'mps' or ['cuda:N'...]
|
26 |
+
active_devices: ['cpu', 'mps', 'cuda:N'...]
|
27 |
+
"""
|
28 |
+
|
29 |
+
if render_devices in ("cpu", "auto", "mps"):
|
30 |
+
render_devices = [render_devices]
|
31 |
+
elif render_devices is not None:
|
32 |
+
if isinstance(render_devices, str):
|
33 |
+
render_devices = [render_devices]
|
34 |
+
if isinstance(render_devices, list) and len(render_devices) > 0:
|
35 |
+
render_devices = list(filter(lambda x: x.startswith("cuda:") or x == "mps", render_devices))
|
36 |
+
if len(render_devices) == 0:
|
37 |
+
raise Exception(
|
38 |
+
'Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "mps"} or {"render_devices": "auto"}'
|
39 |
+
)
|
40 |
+
|
41 |
+
render_devices = list(filter(lambda x: is_device_compatible(x), render_devices))
|
42 |
+
if len(render_devices) == 0:
|
43 |
+
raise Exception(
|
44 |
+
"Sorry, none of the render_devices configured in config.json are compatible with Stable Diffusion"
|
45 |
+
)
|
46 |
+
else:
|
47 |
+
raise Exception(
|
48 |
+
'Invalid render_devices value in config.json. Valid: {"render_devices": ["cuda:0", "cuda:1"...]}, or {"render_devices": "cpu"} or {"render_devices": "auto"}'
|
49 |
+
)
|
50 |
+
else:
|
51 |
+
render_devices = ["auto"]
|
52 |
+
|
53 |
+
if "auto" in render_devices:
|
54 |
+
render_devices = auto_pick_devices(active_devices)
|
55 |
+
if "cpu" in render_devices:
|
56 |
+
log.warn("WARNING: Could not find a compatible GPU. Using the CPU, but this will be very slow!")
|
57 |
+
|
58 |
+
active_devices = set(active_devices)
|
59 |
+
render_devices = set(render_devices)
|
60 |
+
|
61 |
+
devices_to_start = render_devices - active_devices
|
62 |
+
devices_to_stop = active_devices - render_devices
|
63 |
+
|
64 |
+
return devices_to_start, devices_to_stop
|
65 |
+
|
66 |
+
|
67 |
+
def is_mps_available():
|
68 |
+
return (
|
69 |
+
platform.system() == "Darwin"
|
70 |
+
and hasattr(torch.backends, "mps")
|
71 |
+
and torch.backends.mps.is_available()
|
72 |
+
and torch.backends.mps.is_built()
|
73 |
+
)
|
74 |
+
|
75 |
+
|
76 |
+
def is_cuda_available():
|
77 |
+
return torch.cuda.is_available()
|
78 |
+
|
79 |
+
|
80 |
+
def auto_pick_devices(currently_active_devices):
|
81 |
+
global mem_free_threshold
|
82 |
+
|
83 |
+
if is_mps_available():
|
84 |
+
return ["mps"]
|
85 |
+
|
86 |
+
if not is_cuda_available():
|
87 |
+
return ["cpu"]
|
88 |
+
|
89 |
+
device_count = torch.cuda.device_count()
|
90 |
+
if device_count == 1:
|
91 |
+
return ["cuda:0"] if is_device_compatible("cuda:0") else ["cpu"]
|
92 |
+
|
93 |
+
log.debug("Autoselecting GPU. Using most free memory.")
|
94 |
+
devices = []
|
95 |
+
for device in range(device_count):
|
96 |
+
device = f"cuda:{device}"
|
97 |
+
if not is_device_compatible(device):
|
98 |
+
continue
|
99 |
+
|
100 |
+
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
101 |
+
mem_free /= float(10**9)
|
102 |
+
mem_total /= float(10**9)
|
103 |
+
device_name = torch.cuda.get_device_name(device)
|
104 |
+
log.debug(
|
105 |
+
f"{device} detected: {device_name} - Memory (free/total): {round(mem_free, 2)}Gb / {round(mem_total, 2)}Gb"
|
106 |
+
)
|
107 |
+
devices.append({"device": device, "device_name": device_name, "mem_free": mem_free})
|
108 |
+
|
109 |
+
devices.sort(key=lambda x: x["mem_free"], reverse=True)
|
110 |
+
max_mem_free = devices[0]["mem_free"]
|
111 |
+
curr_mem_free_threshold = COMPARABLE_GPU_PERCENTILE * max_mem_free
|
112 |
+
mem_free_threshold = max(curr_mem_free_threshold, mem_free_threshold)
|
113 |
+
|
114 |
+
# Auto-pick algorithm:
|
115 |
+
# 1. Pick the top 75 percentile of the GPUs, sorted by free_mem.
|
116 |
+
# 2. Also include already-running devices (GPU-only), otherwise their free_mem will
|
117 |
+
# always be very low (since their VRAM contains the model).
|
118 |
+
# These already-running devices probably aren't terrible, since they were picked in the past.
|
119 |
+
# Worst case, the user can restart the program and that'll get rid of them.
|
120 |
+
devices = list(
|
121 |
+
filter((lambda x: x["mem_free"] > mem_free_threshold or x["device"] in currently_active_devices), devices)
|
122 |
+
)
|
123 |
+
devices = list(map(lambda x: x["device"], devices))
|
124 |
+
return devices
|
125 |
+
|
126 |
+
|
127 |
+
def device_init(context, device):
|
128 |
+
"""
|
129 |
+
This function assumes the 'device' has already been verified to be compatible.
|
130 |
+
`get_device_delta()` has already filtered out incompatible devices.
|
131 |
+
"""
|
132 |
+
|
133 |
+
validate_device_id(device, log_prefix="device_init")
|
134 |
+
|
135 |
+
if "cuda" not in device:
|
136 |
+
context.device = device
|
137 |
+
context.device_name = get_processor_name()
|
138 |
+
context.half_precision = False
|
139 |
+
log.debug(f"Render device available as {context.device_name}")
|
140 |
+
return
|
141 |
+
|
142 |
+
context.device_name = torch.cuda.get_device_name(device)
|
143 |
+
context.device = device
|
144 |
+
|
145 |
+
# Force full precision on 1660 and 1650 NVIDIA cards to avoid creating green images
|
146 |
+
if needs_to_force_full_precision(context):
|
147 |
+
log.warn(f"forcing full precision on this GPU, to avoid green images. GPU detected: {context.device_name}")
|
148 |
+
# Apply force_full_precision now before models are loaded.
|
149 |
+
context.half_precision = False
|
150 |
+
|
151 |
+
log.info(f'Setting {device} as active, with precision: {"half" if context.half_precision else "full"}')
|
152 |
+
torch.cuda.device(device)
|
153 |
+
|
154 |
+
|
155 |
+
def needs_to_force_full_precision(context):
|
156 |
+
if "FORCE_FULL_PRECISION" in os.environ:
|
157 |
+
return True
|
158 |
+
|
159 |
+
device_name = context.device_name.lower()
|
160 |
+
return (
|
161 |
+
("nvidia" in device_name or "geforce" in device_name or "quadro" in device_name)
|
162 |
+
and (
|
163 |
+
" 1660" in device_name
|
164 |
+
or " 1650" in device_name
|
165 |
+
or " t400" in device_name
|
166 |
+
or " t550" in device_name
|
167 |
+
or " t600" in device_name
|
168 |
+
or " t1000" in device_name
|
169 |
+
or " t1200" in device_name
|
170 |
+
or " t2000" in device_name
|
171 |
+
)
|
172 |
+
) or ("tesla k40m" in device_name)
|
173 |
+
|
174 |
+
|
175 |
+
def get_max_vram_usage_level(device):
|
176 |
+
if "cuda" in device:
|
177 |
+
_, mem_total = torch.cuda.mem_get_info(device)
|
178 |
+
else:
|
179 |
+
return "high"
|
180 |
+
|
181 |
+
mem_total /= float(10**9)
|
182 |
+
if mem_total < 4.5:
|
183 |
+
return "low"
|
184 |
+
elif mem_total < 6.5:
|
185 |
+
return "balanced"
|
186 |
+
|
187 |
+
return "high"
|
188 |
+
|
189 |
+
|
190 |
+
def validate_device_id(device, log_prefix=""):
|
191 |
+
def is_valid():
|
192 |
+
if not isinstance(device, str):
|
193 |
+
return False
|
194 |
+
if device == "cpu" or device == "mps":
|
195 |
+
return True
|
196 |
+
if not device.startswith("cuda:") or not device[5:].isnumeric():
|
197 |
+
return False
|
198 |
+
return True
|
199 |
+
|
200 |
+
if not is_valid():
|
201 |
+
raise EnvironmentError(
|
202 |
+
f"{log_prefix}: device id should be 'cpu', 'mps', or 'cuda:N' (where N is an integer index for the GPU). Got: {device}"
|
203 |
+
)
|
204 |
+
|
205 |
+
|
206 |
+
def is_device_compatible(device):
|
207 |
+
"""
|
208 |
+
Returns True/False, and prints any compatibility errors
|
209 |
+
"""
|
210 |
+
# static variable "history".
|
211 |
+
is_device_compatible.history = getattr(is_device_compatible, "history", {})
|
212 |
+
try:
|
213 |
+
validate_device_id(device, log_prefix="is_device_compatible")
|
214 |
+
except:
|
215 |
+
log.error(str(e))
|
216 |
+
return False
|
217 |
+
|
218 |
+
if device in ("cpu", "mps"):
|
219 |
+
return True
|
220 |
+
# Memory check
|
221 |
+
try:
|
222 |
+
_, mem_total = torch.cuda.mem_get_info(device)
|
223 |
+
mem_total /= float(10**9)
|
224 |
+
if mem_total < 3.0:
|
225 |
+
if is_device_compatible.history.get(device) == None:
|
226 |
+
log.warn(f"GPU {device} with less than 3 GB of VRAM is not compatible with Stable Diffusion")
|
227 |
+
is_device_compatible.history[device] = 1
|
228 |
+
return False
|
229 |
+
except RuntimeError as e:
|
230 |
+
log.error(str(e))
|
231 |
+
return False
|
232 |
+
return True
|
233 |
+
|
234 |
+
|
235 |
+
def get_processor_name():
|
236 |
+
try:
|
237 |
+
import subprocess
|
238 |
+
|
239 |
+
if platform.system() == "Windows":
|
240 |
+
return platform.processor()
|
241 |
+
elif platform.system() == "Darwin":
|
242 |
+
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + "/usr/sbin"
|
243 |
+
command = "sysctl -n machdep.cpu.brand_string"
|
244 |
+
return subprocess.check_output(command, shell=True).decode().strip()
|
245 |
+
elif platform.system() == "Linux":
|
246 |
+
command = "cat /proc/cpuinfo"
|
247 |
+
all_info = subprocess.check_output(command, shell=True).decode().strip()
|
248 |
+
for line in all_info.split("\n"):
|
249 |
+
if "model name" in line:
|
250 |
+
return re.sub(".*model name.*:", "", line, 1).strip()
|
251 |
+
except:
|
252 |
+
log.error(traceback.format_exc())
|
253 |
+
return "cpu"
|
ui/easydiffusion/model_manager.py
ADDED
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from easydiffusion import app
|
4 |
+
from easydiffusion.types import TaskData
|
5 |
+
from easydiffusion.utils import log
|
6 |
+
|
7 |
+
from sdkit import Context
|
8 |
+
from sdkit.models import load_model, unload_model, scan_model
|
9 |
+
|
10 |
+
KNOWN_MODEL_TYPES = ["stable-diffusion", "vae", "hypernetwork", "gfpgan", "realesrgan"]
|
11 |
+
MODEL_EXTENSIONS = {
|
12 |
+
"stable-diffusion": [".ckpt", ".safetensors"],
|
13 |
+
"vae": [".vae.pt", ".ckpt", ".safetensors"],
|
14 |
+
"hypernetwork": [".pt", ".safetensors"],
|
15 |
+
"gfpgan": [".pth"],
|
16 |
+
"realesrgan": [".pth"],
|
17 |
+
}
|
18 |
+
DEFAULT_MODELS = {
|
19 |
+
"stable-diffusion": [ # needed to support the legacy installations
|
20 |
+
"custom-model", # only one custom model file was supported initially, creatively named 'custom-model'
|
21 |
+
"sd-v1-4", # Default fallback.
|
22 |
+
],
|
23 |
+
"gfpgan": ["GFPGANv1.3"],
|
24 |
+
"realesrgan": ["RealESRGAN_x4plus"],
|
25 |
+
}
|
26 |
+
MODELS_TO_LOAD_ON_START = ["stable-diffusion", "vae", "hypernetwork"]
|
27 |
+
|
28 |
+
known_models = {}
|
29 |
+
|
30 |
+
|
31 |
+
def init():
|
32 |
+
make_model_folders()
|
33 |
+
getModels() # run this once, to cache the picklescan results
|
34 |
+
|
35 |
+
|
36 |
+
def load_default_models(context: Context):
|
37 |
+
set_vram_optimizations(context)
|
38 |
+
|
39 |
+
# init default model paths
|
40 |
+
for model_type in MODELS_TO_LOAD_ON_START:
|
41 |
+
context.model_paths[model_type] = resolve_model_to_use(model_type=model_type)
|
42 |
+
try:
|
43 |
+
load_model(context, model_type)
|
44 |
+
except Exception as e:
|
45 |
+
log.error(f"[red]Error while loading {model_type} model: {context.model_paths[model_type]}[/red]")
|
46 |
+
log.error(f"[red]Error: {e}[/red]")
|
47 |
+
log.error(f"[red]Consider removing the model from the model folder.[red]")
|
48 |
+
|
49 |
+
|
50 |
+
def unload_all(context: Context):
|
51 |
+
for model_type in KNOWN_MODEL_TYPES:
|
52 |
+
unload_model(context, model_type)
|
53 |
+
|
54 |
+
|
55 |
+
def resolve_model_to_use(model_name: str = None, model_type: str = None):
|
56 |
+
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
|
57 |
+
default_models = DEFAULT_MODELS.get(model_type, [])
|
58 |
+
config = app.getConfig()
|
59 |
+
|
60 |
+
model_dirs = [os.path.join(app.MODELS_DIR, model_type), app.SD_DIR]
|
61 |
+
if not model_name: # When None try user configured model.
|
62 |
+
# config = getConfig()
|
63 |
+
if "model" in config and model_type in config["model"]:
|
64 |
+
model_name = config["model"][model_type]
|
65 |
+
|
66 |
+
if model_name:
|
67 |
+
# Check models directory
|
68 |
+
models_dir_path = os.path.join(app.MODELS_DIR, model_type, model_name)
|
69 |
+
for model_extension in model_extensions:
|
70 |
+
if os.path.exists(models_dir_path + model_extension):
|
71 |
+
return models_dir_path + model_extension
|
72 |
+
if os.path.exists(model_name + model_extension):
|
73 |
+
return os.path.abspath(model_name + model_extension)
|
74 |
+
|
75 |
+
# Default locations
|
76 |
+
if model_name in default_models:
|
77 |
+
default_model_path = os.path.join(app.SD_DIR, model_name)
|
78 |
+
for model_extension in model_extensions:
|
79 |
+
if os.path.exists(default_model_path + model_extension):
|
80 |
+
return default_model_path + model_extension
|
81 |
+
|
82 |
+
# Can't find requested model, check the default paths.
|
83 |
+
for default_model in default_models:
|
84 |
+
for model_dir in model_dirs:
|
85 |
+
default_model_path = os.path.join(model_dir, default_model)
|
86 |
+
for model_extension in model_extensions:
|
87 |
+
if os.path.exists(default_model_path + model_extension):
|
88 |
+
if model_name is not None:
|
89 |
+
log.warn(
|
90 |
+
f"Could not find the configured custom model {model_name}{model_extension}. Using the default one: {default_model_path}{model_extension}"
|
91 |
+
)
|
92 |
+
return default_model_path + model_extension
|
93 |
+
|
94 |
+
return None
|
95 |
+
|
96 |
+
|
97 |
+
def reload_models_if_necessary(context: Context, task_data: TaskData):
|
98 |
+
model_paths_in_req = {
|
99 |
+
"stable-diffusion": task_data.use_stable_diffusion_model,
|
100 |
+
"vae": task_data.use_vae_model,
|
101 |
+
"hypernetwork": task_data.use_hypernetwork_model,
|
102 |
+
"gfpgan": task_data.use_face_correction,
|
103 |
+
"realesrgan": task_data.use_upscale,
|
104 |
+
"nsfw_checker": True if task_data.block_nsfw else None,
|
105 |
+
}
|
106 |
+
models_to_reload = {
|
107 |
+
model_type: path
|
108 |
+
for model_type, path in model_paths_in_req.items()
|
109 |
+
if context.model_paths.get(model_type) != path
|
110 |
+
}
|
111 |
+
|
112 |
+
if set_vram_optimizations(context): # reload SD
|
113 |
+
models_to_reload["stable-diffusion"] = model_paths_in_req["stable-diffusion"]
|
114 |
+
|
115 |
+
for model_type, model_path_in_req in models_to_reload.items():
|
116 |
+
context.model_paths[model_type] = model_path_in_req
|
117 |
+
|
118 |
+
action_fn = unload_model if context.model_paths[model_type] is None else load_model
|
119 |
+
action_fn(context, model_type, scan_model=False) # we've scanned them already
|
120 |
+
|
121 |
+
|
122 |
+
def resolve_model_paths(task_data: TaskData):
|
123 |
+
task_data.use_stable_diffusion_model = resolve_model_to_use(
|
124 |
+
task_data.use_stable_diffusion_model, model_type="stable-diffusion"
|
125 |
+
)
|
126 |
+
task_data.use_vae_model = resolve_model_to_use(task_data.use_vae_model, model_type="vae")
|
127 |
+
task_data.use_hypernetwork_model = resolve_model_to_use(task_data.use_hypernetwork_model, model_type="hypernetwork")
|
128 |
+
|
129 |
+
if task_data.use_face_correction:
|
130 |
+
task_data.use_face_correction = resolve_model_to_use(task_data.use_face_correction, "gfpgan")
|
131 |
+
if task_data.use_upscale:
|
132 |
+
task_data.use_upscale = resolve_model_to_use(task_data.use_upscale, "realesrgan")
|
133 |
+
|
134 |
+
|
135 |
+
def set_vram_optimizations(context: Context):
|
136 |
+
config = app.getConfig()
|
137 |
+
vram_usage_level = config.get("vram_usage_level", "balanced")
|
138 |
+
|
139 |
+
if vram_usage_level != context.vram_usage_level:
|
140 |
+
context.vram_usage_level = vram_usage_level
|
141 |
+
return True
|
142 |
+
|
143 |
+
return False
|
144 |
+
|
145 |
+
|
146 |
+
def make_model_folders():
|
147 |
+
for model_type in KNOWN_MODEL_TYPES:
|
148 |
+
model_dir_path = os.path.join(app.MODELS_DIR, model_type)
|
149 |
+
|
150 |
+
os.makedirs(model_dir_path, exist_ok=True)
|
151 |
+
|
152 |
+
help_file_name = f"Place your {model_type} model files here.txt"
|
153 |
+
help_file_contents = f'Supported extensions: {" or ".join(MODEL_EXTENSIONS.get(model_type))}'
|
154 |
+
|
155 |
+
with open(os.path.join(model_dir_path, help_file_name), "w", encoding="utf-8") as f:
|
156 |
+
f.write(help_file_contents)
|
157 |
+
|
158 |
+
|
159 |
+
def is_malicious_model(file_path):
|
160 |
+
try:
|
161 |
+
if file_path.endswith(".safetensors"):
|
162 |
+
return False
|
163 |
+
scan_result = scan_model(file_path)
|
164 |
+
if scan_result.issues_count > 0 or scan_result.infected_files > 0:
|
165 |
+
log.warn(
|
166 |
+
":warning: [bold red]Scan %s: %d scanned, %d issue, %d infected.[/bold red]"
|
167 |
+
% (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
|
168 |
+
)
|
169 |
+
return True
|
170 |
+
else:
|
171 |
+
log.debug(
|
172 |
+
"Scan %s: [green]%d scanned, %d issue, %d infected.[/green]"
|
173 |
+
% (file_path, scan_result.scanned_files, scan_result.issues_count, scan_result.infected_files)
|
174 |
+
)
|
175 |
+
return False
|
176 |
+
except Exception as e:
|
177 |
+
log.error(f"error while scanning: {file_path}, error: {e}")
|
178 |
+
return False
|
179 |
+
|
180 |
+
|
181 |
+
def getModels():
|
182 |
+
models = {
|
183 |
+
"active": {
|
184 |
+
"stable-diffusion": "sd-v1-4",
|
185 |
+
"vae": "",
|
186 |
+
"hypernetwork": "",
|
187 |
+
},
|
188 |
+
"options": {
|
189 |
+
"stable-diffusion": ["sd-v1-4"],
|
190 |
+
"vae": [],
|
191 |
+
"hypernetwork": [],
|
192 |
+
},
|
193 |
+
}
|
194 |
+
|
195 |
+
models_scanned = 0
|
196 |
+
|
197 |
+
class MaliciousModelException(Exception):
|
198 |
+
"Raised when picklescan reports a problem with a model"
|
199 |
+
pass
|
200 |
+
|
201 |
+
def scan_directory(directory, suffixes, directoriesFirst: bool = True):
|
202 |
+
nonlocal models_scanned
|
203 |
+
tree = []
|
204 |
+
for entry in sorted(
|
205 |
+
os.scandir(directory), key=lambda entry: (entry.is_file() == directoriesFirst, entry.name.lower())
|
206 |
+
):
|
207 |
+
if entry.is_file():
|
208 |
+
matching_suffix = list(filter(lambda s: entry.name.endswith(s), suffixes))
|
209 |
+
if len(matching_suffix) == 0:
|
210 |
+
continue
|
211 |
+
matching_suffix = matching_suffix[0]
|
212 |
+
|
213 |
+
mtime = entry.stat().st_mtime
|
214 |
+
mod_time = known_models[entry.path] if entry.path in known_models else -1
|
215 |
+
if mod_time != mtime:
|
216 |
+
models_scanned += 1
|
217 |
+
if is_malicious_model(entry.path):
|
218 |
+
raise MaliciousModelException(entry.path)
|
219 |
+
known_models[entry.path] = mtime
|
220 |
+
tree.append(entry.name[: -len(matching_suffix)])
|
221 |
+
elif entry.is_dir():
|
222 |
+
scan = scan_directory(entry.path, suffixes, directoriesFirst=False)
|
223 |
+
|
224 |
+
if len(scan) != 0:
|
225 |
+
tree.append((entry.name, scan))
|
226 |
+
return tree
|
227 |
+
|
228 |
+
def listModels(model_type):
|
229 |
+
nonlocal models_scanned
|
230 |
+
|
231 |
+
model_extensions = MODEL_EXTENSIONS.get(model_type, [])
|
232 |
+
models_dir = os.path.join(app.MODELS_DIR, model_type)
|
233 |
+
if not os.path.exists(models_dir):
|
234 |
+
os.makedirs(models_dir)
|
235 |
+
|
236 |
+
try:
|
237 |
+
models["options"][model_type] = scan_directory(models_dir, model_extensions)
|
238 |
+
except MaliciousModelException as e:
|
239 |
+
models["scan-error"] = e
|
240 |
+
|
241 |
+
# custom models
|
242 |
+
listModels(model_type="stable-diffusion")
|
243 |
+
listModels(model_type="vae")
|
244 |
+
listModels(model_type="hypernetwork")
|
245 |
+
listModels(model_type="gfpgan")
|
246 |
+
|
247 |
+
if models_scanned > 0:
|
248 |
+
log.info(f"[green]Scanned {models_scanned} models. Nothing infected[/]")
|
249 |
+
|
250 |
+
# legacy
|
251 |
+
custom_weight_path = os.path.join(app.SD_DIR, "custom-model.ckpt")
|
252 |
+
if os.path.exists(custom_weight_path):
|
253 |
+
models["options"]["stable-diffusion"].append("custom-model")
|
254 |
+
|
255 |
+
return models
|
ui/easydiffusion/renderer.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import queue
|
2 |
+
import time
|
3 |
+
import json
|
4 |
+
import pprint
|
5 |
+
|
6 |
+
from easydiffusion import device_manager
|
7 |
+
from easydiffusion.types import TaskData, Response, Image as ResponseImage, UserInitiatedStop, GenerateImageRequest
|
8 |
+
from easydiffusion.utils import get_printable_request, save_images_to_disk, log
|
9 |
+
|
10 |
+
from sdkit import Context
|
11 |
+
from sdkit.generate import generate_images
|
12 |
+
from sdkit.filter import apply_filters
|
13 |
+
from sdkit.utils import img_to_buffer, img_to_base64_str, latent_samples_to_images, gc
|
14 |
+
|
15 |
+
context = Context() # thread-local
|
16 |
+
"""
|
17 |
+
runtime data (bound locally to this thread), for e.g. device, references to loaded models, optimization flags etc
|
18 |
+
"""
|
19 |
+
|
20 |
+
|
21 |
+
def init(device):
|
22 |
+
"""
|
23 |
+
Initializes the fields that will be bound to this runtime's context, and sets the current torch device
|
24 |
+
"""
|
25 |
+
context.stop_processing = False
|
26 |
+
context.temp_images = {}
|
27 |
+
context.partial_x_samples = None
|
28 |
+
|
29 |
+
device_manager.device_init(context, device)
|
30 |
+
|
31 |
+
|
32 |
+
def make_images(
|
33 |
+
req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
|
34 |
+
):
|
35 |
+
context.stop_processing = False
|
36 |
+
print_task_info(req, task_data)
|
37 |
+
|
38 |
+
images, seeds = make_images_internal(req, task_data, data_queue, task_temp_images, step_callback)
|
39 |
+
|
40 |
+
res = Response(req, task_data, images=construct_response(images, seeds, task_data, base_seed=req.seed))
|
41 |
+
res = res.json()
|
42 |
+
data_queue.put(json.dumps(res))
|
43 |
+
log.info("Task completed")
|
44 |
+
|
45 |
+
return res
|
46 |
+
|
47 |
+
|
48 |
+
def print_task_info(req: GenerateImageRequest, task_data: TaskData):
|
49 |
+
req_str = pprint.pformat(get_printable_request(req)).replace("[", "\[")
|
50 |
+
task_str = pprint.pformat(task_data.dict()).replace("[", "\[")
|
51 |
+
log.info(f"request: {req_str}")
|
52 |
+
log.info(f"task data: {task_str}")
|
53 |
+
|
54 |
+
|
55 |
+
def make_images_internal(
|
56 |
+
req: GenerateImageRequest, task_data: TaskData, data_queue: queue.Queue, task_temp_images: list, step_callback
|
57 |
+
):
|
58 |
+
|
59 |
+
images, user_stopped = generate_images_internal(
|
60 |
+
req, task_data, data_queue, task_temp_images, step_callback, task_data.stream_image_progress, task_data.stream_image_progress_interval
|
61 |
+
)
|
62 |
+
filtered_images = filter_images(task_data, images, user_stopped)
|
63 |
+
|
64 |
+
if task_data.save_to_disk_path is not None:
|
65 |
+
save_images_to_disk(images, filtered_images, req, task_data)
|
66 |
+
|
67 |
+
seeds = [*range(req.seed, req.seed + len(images))]
|
68 |
+
if task_data.show_only_filtered_image or filtered_images is images:
|
69 |
+
return filtered_images, seeds
|
70 |
+
else:
|
71 |
+
return images + filtered_images, seeds + seeds
|
72 |
+
|
73 |
+
|
74 |
+
def generate_images_internal(
|
75 |
+
req: GenerateImageRequest,
|
76 |
+
task_data: TaskData,
|
77 |
+
data_queue: queue.Queue,
|
78 |
+
task_temp_images: list,
|
79 |
+
step_callback,
|
80 |
+
stream_image_progress: bool,
|
81 |
+
stream_image_progress_interval: int,
|
82 |
+
):
|
83 |
+
context.temp_images.clear()
|
84 |
+
|
85 |
+
callback = make_step_callback(req, task_data, data_queue, task_temp_images, step_callback, stream_image_progress, stream_image_progress_interval)
|
86 |
+
|
87 |
+
try:
|
88 |
+
if req.init_image is not None:
|
89 |
+
req.sampler_name = "ddim"
|
90 |
+
|
91 |
+
images = generate_images(context, callback=callback, **req.dict())
|
92 |
+
user_stopped = False
|
93 |
+
except UserInitiatedStop:
|
94 |
+
images = []
|
95 |
+
user_stopped = True
|
96 |
+
if context.partial_x_samples is not None:
|
97 |
+
images = latent_samples_to_images(context, context.partial_x_samples)
|
98 |
+
finally:
|
99 |
+
if hasattr(context, "partial_x_samples") and context.partial_x_samples is not None:
|
100 |
+
del context.partial_x_samples
|
101 |
+
context.partial_x_samples = None
|
102 |
+
|
103 |
+
return images, user_stopped
|
104 |
+
|
105 |
+
|
106 |
+
def filter_images(task_data: TaskData, images: list, user_stopped):
|
107 |
+
if user_stopped:
|
108 |
+
return images
|
109 |
+
|
110 |
+
filters_to_apply = []
|
111 |
+
if task_data.block_nsfw:
|
112 |
+
filters_to_apply.append("nsfw_checker")
|
113 |
+
if task_data.use_face_correction and "gfpgan" in task_data.use_face_correction.lower():
|
114 |
+
filters_to_apply.append("gfpgan")
|
115 |
+
if task_data.use_upscale and "realesrgan" in task_data.use_upscale.lower():
|
116 |
+
filters_to_apply.append("realesrgan")
|
117 |
+
|
118 |
+
if len(filters_to_apply) == 0:
|
119 |
+
return images
|
120 |
+
|
121 |
+
return apply_filters(context, filters_to_apply, images, scale=task_data.upscale_amount)
|
122 |
+
|
123 |
+
|
124 |
+
def construct_response(images: list, seeds: list, task_data: TaskData, base_seed: int):
|
125 |
+
return [
|
126 |
+
ResponseImage(
|
127 |
+
data=img_to_base64_str(img, task_data.output_format, task_data.output_quality),
|
128 |
+
seed=seed,
|
129 |
+
)
|
130 |
+
for img, seed in zip(images, seeds)
|
131 |
+
]
|
132 |
+
|
133 |
+
|
134 |
+
def make_step_callback(
|
135 |
+
req: GenerateImageRequest,
|
136 |
+
task_data: TaskData,
|
137 |
+
data_queue: queue.Queue,
|
138 |
+
task_temp_images: list,
|
139 |
+
step_callback,
|
140 |
+
stream_image_progress: bool,
|
141 |
+
stream_image_progress_interval: int,
|
142 |
+
):
|
143 |
+
n_steps = req.num_inference_steps if req.init_image is None else int(req.num_inference_steps * req.prompt_strength)
|
144 |
+
last_callback_time = -1
|
145 |
+
|
146 |
+
def update_temp_img(x_samples, task_temp_images: list):
|
147 |
+
partial_images = []
|
148 |
+
images = latent_samples_to_images(context, x_samples)
|
149 |
+
if task_data.block_nsfw:
|
150 |
+
images = apply_filters(context, "nsfw_checker", images)
|
151 |
+
|
152 |
+
for i, img in enumerate(images):
|
153 |
+
buf = img_to_buffer(img, output_format="JPEG")
|
154 |
+
|
155 |
+
context.temp_images[f"{task_data.request_id}/{i}"] = buf
|
156 |
+
task_temp_images[i] = buf
|
157 |
+
partial_images.append({"path": f"/image/tmp/{task_data.request_id}/{i}"})
|
158 |
+
del images
|
159 |
+
return partial_images
|
160 |
+
|
161 |
+
def on_image_step(x_samples, i):
|
162 |
+
nonlocal last_callback_time
|
163 |
+
|
164 |
+
context.partial_x_samples = x_samples
|
165 |
+
step_time = time.time() - last_callback_time if last_callback_time != -1 else -1
|
166 |
+
last_callback_time = time.time()
|
167 |
+
|
168 |
+
progress = {"step": i, "step_time": step_time, "total_steps": n_steps}
|
169 |
+
|
170 |
+
if stream_image_progress and stream_image_progress_interval > 0 and i % stream_image_progress_interval == 0:
|
171 |
+
progress["output"] = update_temp_img(x_samples, task_temp_images)
|
172 |
+
|
173 |
+
data_queue.put(json.dumps(progress))
|
174 |
+
|
175 |
+
step_callback()
|
176 |
+
|
177 |
+
if context.stop_processing:
|
178 |
+
raise UserInitiatedStop("User requested that we stop processing")
|
179 |
+
|
180 |
+
return on_image_step
|
ui/easydiffusion/server.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""server.py: FastAPI SD-UI Web Host.
|
2 |
+
Notes:
|
3 |
+
async endpoints always run on the main thread. Without they run on the thread pool.
|
4 |
+
"""
|
5 |
+
import os
|
6 |
+
import traceback
|
7 |
+
import datetime
|
8 |
+
from typing import List, Union
|
9 |
+
|
10 |
+
from fastapi import FastAPI, HTTPException
|
11 |
+
from fastapi.staticfiles import StaticFiles
|
12 |
+
from starlette.responses import FileResponse, JSONResponse, StreamingResponse
|
13 |
+
from pydantic import BaseModel
|
14 |
+
|
15 |
+
from easydiffusion import app, model_manager, task_manager
|
16 |
+
from easydiffusion.types import TaskData, GenerateImageRequest, MergeRequest
|
17 |
+
from easydiffusion.utils import log
|
18 |
+
|
19 |
+
import mimetypes
|
20 |
+
|
21 |
+
log.info(f"started in {app.SD_DIR}")
|
22 |
+
log.info(f"started at {datetime.datetime.now():%x %X}")
|
23 |
+
|
24 |
+
server_api = FastAPI()
|
25 |
+
|
26 |
+
NOCACHE_HEADERS = {"Cache-Control": "no-cache, no-store, must-revalidate", "Pragma": "no-cache", "Expires": "0"}
|
27 |
+
|
28 |
+
|
29 |
+
class NoCacheStaticFiles(StaticFiles):
|
30 |
+
def __init__(self, directory: str):
|
31 |
+
# follow_symlink is only available on fastapi >= 0.92.0
|
32 |
+
if (os.path.islink(directory)):
|
33 |
+
super().__init__(directory = os.path.realpath(directory))
|
34 |
+
else:
|
35 |
+
super().__init__(directory = directory)
|
36 |
+
|
37 |
+
def is_not_modified(self, response_headers, request_headers) -> bool:
|
38 |
+
if "content-type" in response_headers and (
|
39 |
+
"javascript" in response_headers["content-type"] or "css" in response_headers["content-type"]
|
40 |
+
):
|
41 |
+
response_headers.update(NOCACHE_HEADERS)
|
42 |
+
return False
|
43 |
+
|
44 |
+
return super().is_not_modified(response_headers, request_headers)
|
45 |
+
|
46 |
+
|
47 |
+
class SetAppConfigRequest(BaseModel):
|
48 |
+
update_branch: str = None
|
49 |
+
render_devices: Union[List[str], List[int], str, int] = None
|
50 |
+
model_vae: str = None
|
51 |
+
ui_open_browser_on_start: bool = None
|
52 |
+
listen_to_network: bool = None
|
53 |
+
listen_port: int = None
|
54 |
+
|
55 |
+
|
56 |
+
def init():
|
57 |
+
mimetypes.init()
|
58 |
+
mimetypes.add_type('text/css', '.css')
|
59 |
+
|
60 |
+
if os.path.isdir(app.CUSTOM_MODIFIERS_DIR):
|
61 |
+
server_api.mount(
|
62 |
+
"/media/modifier-thumbnails/custom",
|
63 |
+
NoCacheStaticFiles(directory=app.CUSTOM_MODIFIERS_DIR),
|
64 |
+
name="custom-thumbnails",
|
65 |
+
)
|
66 |
+
|
67 |
+
server_api.mount("/media", NoCacheStaticFiles(directory=os.path.join(app.SD_UI_DIR, "media")), name="media")
|
68 |
+
|
69 |
+
for plugins_dir, dir_prefix in app.UI_PLUGINS_SOURCES:
|
70 |
+
server_api.mount(
|
71 |
+
f"/plugins/{dir_prefix}", NoCacheStaticFiles(directory=plugins_dir), name=f"plugins-{dir_prefix}"
|
72 |
+
)
|
73 |
+
|
74 |
+
@server_api.post("/app_config")
|
75 |
+
async def set_app_config(req: SetAppConfigRequest):
|
76 |
+
return set_app_config_internal(req)
|
77 |
+
|
78 |
+
@server_api.get("/get/{key:path}")
|
79 |
+
def read_web_data(key: str = None):
|
80 |
+
return read_web_data_internal(key)
|
81 |
+
|
82 |
+
@server_api.get("/ping") # Get server and optionally session status.
|
83 |
+
def ping(session_id: str = None):
|
84 |
+
return ping_internal(session_id)
|
85 |
+
|
86 |
+
@server_api.post("/render")
|
87 |
+
def render(req: dict):
|
88 |
+
return render_internal(req)
|
89 |
+
|
90 |
+
@server_api.post("/model/merge")
|
91 |
+
def model_merge(req: dict):
|
92 |
+
print(req)
|
93 |
+
return model_merge_internal(req)
|
94 |
+
|
95 |
+
@server_api.get("/image/stream/{task_id:int}")
|
96 |
+
def stream(task_id: int):
|
97 |
+
return stream_internal(task_id)
|
98 |
+
|
99 |
+
@server_api.get("/image/stop")
|
100 |
+
def stop(task: int):
|
101 |
+
return stop_internal(task)
|
102 |
+
|
103 |
+
@server_api.get("/image/tmp/{task_id:int}/{img_id:int}")
|
104 |
+
def get_image(task_id: int, img_id: int):
|
105 |
+
return get_image_internal(task_id, img_id)
|
106 |
+
|
107 |
+
@server_api.get("/")
|
108 |
+
def read_root():
|
109 |
+
return FileResponse(os.path.join(app.SD_UI_DIR, "index.html"), headers=NOCACHE_HEADERS)
|
110 |
+
|
111 |
+
@server_api.on_event("shutdown")
|
112 |
+
def shutdown_event(): # Signal render thread to close on shutdown
|
113 |
+
task_manager.current_state_error = SystemExit("Application shutting down.")
|
114 |
+
|
115 |
+
|
116 |
+
# API implementations
|
117 |
+
def set_app_config_internal(req: SetAppConfigRequest):
|
118 |
+
config = app.getConfig()
|
119 |
+
if req.update_branch is not None:
|
120 |
+
config["update_branch"] = req.update_branch
|
121 |
+
if req.render_devices is not None:
|
122 |
+
update_render_devices_in_config(config, req.render_devices)
|
123 |
+
if req.ui_open_browser_on_start is not None:
|
124 |
+
if "ui" not in config:
|
125 |
+
config["ui"] = {}
|
126 |
+
config["ui"]["open_browser_on_start"] = req.ui_open_browser_on_start
|
127 |
+
if req.listen_to_network is not None:
|
128 |
+
if "net" not in config:
|
129 |
+
config["net"] = {}
|
130 |
+
config["net"]["listen_to_network"] = bool(req.listen_to_network)
|
131 |
+
if req.listen_port is not None:
|
132 |
+
if "net" not in config:
|
133 |
+
config["net"] = {}
|
134 |
+
config["net"]["listen_port"] = int(req.listen_port)
|
135 |
+
try:
|
136 |
+
app.setConfig(config)
|
137 |
+
|
138 |
+
if req.render_devices:
|
139 |
+
app.update_render_threads()
|
140 |
+
|
141 |
+
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
|
142 |
+
except Exception as e:
|
143 |
+
log.error(traceback.format_exc())
|
144 |
+
raise HTTPException(status_code=500, detail=str(e))
|
145 |
+
|
146 |
+
|
147 |
+
def update_render_devices_in_config(config, render_devices):
|
148 |
+
if render_devices not in ("cpu", "auto") and not render_devices.startswith("cuda:"):
|
149 |
+
raise HTTPException(status_code=400, detail=f"Invalid render device requested: {render_devices}")
|
150 |
+
|
151 |
+
if render_devices.startswith("cuda:"):
|
152 |
+
render_devices = render_devices.split(",")
|
153 |
+
|
154 |
+
config["render_devices"] = render_devices
|
155 |
+
|
156 |
+
|
157 |
+
def read_web_data_internal(key: str = None):
|
158 |
+
if not key: # /get without parameters, stable-diffusion easter egg.
|
159 |
+
raise HTTPException(status_code=418, detail="StableDiffusion is drawing a teapot!") # HTTP418 I'm a teapot
|
160 |
+
elif key == "app_config":
|
161 |
+
return JSONResponse(app.getConfig(), headers=NOCACHE_HEADERS)
|
162 |
+
elif key == "system_info":
|
163 |
+
config = app.getConfig()
|
164 |
+
|
165 |
+
output_dir = config.get("force_save_path", os.path.join(os.path.expanduser("~"), app.OUTPUT_DIRNAME))
|
166 |
+
|
167 |
+
system_info = {
|
168 |
+
"devices": task_manager.get_devices(),
|
169 |
+
"hosts": app.getIPConfig(),
|
170 |
+
"default_output_dir": output_dir,
|
171 |
+
"enforce_output_dir": ("force_save_path" in config),
|
172 |
+
}
|
173 |
+
system_info["devices"]["config"] = config.get("render_devices", "auto")
|
174 |
+
return JSONResponse(system_info, headers=NOCACHE_HEADERS)
|
175 |
+
elif key == "models":
|
176 |
+
return JSONResponse(model_manager.getModels(), headers=NOCACHE_HEADERS)
|
177 |
+
elif key == "modifiers":
|
178 |
+
return JSONResponse(app.get_image_modifiers(), headers=NOCACHE_HEADERS)
|
179 |
+
elif key == "ui_plugins":
|
180 |
+
return JSONResponse(app.getUIPlugins(), headers=NOCACHE_HEADERS)
|
181 |
+
else:
|
182 |
+
raise HTTPException(status_code=404, detail=f"Request for unknown {key}") # HTTP404 Not Found
|
183 |
+
|
184 |
+
|
185 |
+
def ping_internal(session_id: str = None):
|
186 |
+
if task_manager.is_alive() <= 0: # Check that render threads are alive.
|
187 |
+
if task_manager.current_state_error:
|
188 |
+
raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
189 |
+
raise HTTPException(status_code=500, detail="Render thread is dead.")
|
190 |
+
if task_manager.current_state_error and not isinstance(task_manager.current_state_error, StopAsyncIteration):
|
191 |
+
raise HTTPException(status_code=500, detail=str(task_manager.current_state_error))
|
192 |
+
# Alive
|
193 |
+
response = {"status": str(task_manager.current_state)}
|
194 |
+
if session_id:
|
195 |
+
session = task_manager.get_cached_session(session_id, update_ttl=True)
|
196 |
+
response["tasks"] = {id(t): t.status for t in session.tasks}
|
197 |
+
response["devices"] = task_manager.get_devices()
|
198 |
+
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
199 |
+
|
200 |
+
|
201 |
+
def render_internal(req: dict):
|
202 |
+
try:
|
203 |
+
# separate out the request data into rendering and task-specific data
|
204 |
+
render_req: GenerateImageRequest = GenerateImageRequest.parse_obj(req)
|
205 |
+
task_data: TaskData = TaskData.parse_obj(req)
|
206 |
+
|
207 |
+
# Overwrite user specified save path
|
208 |
+
config = app.getConfig()
|
209 |
+
if "force_save_path" in config:
|
210 |
+
task_data.save_to_disk_path = config["force_save_path"]
|
211 |
+
|
212 |
+
render_req.init_image_mask = req.get("mask") # hack: will rename this in the HTTP API in a future revision
|
213 |
+
|
214 |
+
app.save_to_config(
|
215 |
+
task_data.use_stable_diffusion_model,
|
216 |
+
task_data.use_vae_model,
|
217 |
+
task_data.use_hypernetwork_model,
|
218 |
+
task_data.vram_usage_level,
|
219 |
+
)
|
220 |
+
|
221 |
+
# enqueue the task
|
222 |
+
new_task = task_manager.render(render_req, task_data)
|
223 |
+
response = {
|
224 |
+
"status": str(task_manager.current_state),
|
225 |
+
"queue": len(task_manager.tasks_queue),
|
226 |
+
"stream": f"/image/stream/{id(new_task)}",
|
227 |
+
"task": id(new_task),
|
228 |
+
}
|
229 |
+
return JSONResponse(response, headers=NOCACHE_HEADERS)
|
230 |
+
except ChildProcessError as e: # Render thread is dead
|
231 |
+
raise HTTPException(status_code=500, detail=f"Rendering thread has died.") # HTTP500 Internal Server Error
|
232 |
+
except ConnectionRefusedError as e: # Unstarted task pending limit reached, deny queueing too many.
|
233 |
+
raise HTTPException(status_code=503, detail=str(e)) # HTTP503 Service Unavailable
|
234 |
+
except Exception as e:
|
235 |
+
log.error(traceback.format_exc())
|
236 |
+
raise HTTPException(status_code=500, detail=str(e))
|
237 |
+
|
238 |
+
|
239 |
+
def model_merge_internal(req: dict):
|
240 |
+
try:
|
241 |
+
from sdkit.train import merge_models
|
242 |
+
from easydiffusion.utils.save_utils import filename_regex
|
243 |
+
|
244 |
+
mergeReq: MergeRequest = MergeRequest.parse_obj(req)
|
245 |
+
|
246 |
+
merge_models(
|
247 |
+
model_manager.resolve_model_to_use(mergeReq.model0, "stable-diffusion"),
|
248 |
+
model_manager.resolve_model_to_use(mergeReq.model1, "stable-diffusion"),
|
249 |
+
mergeReq.ratio,
|
250 |
+
os.path.join(app.MODELS_DIR, "stable-diffusion", filename_regex.sub("_", mergeReq.out_path)),
|
251 |
+
mergeReq.use_fp16,
|
252 |
+
)
|
253 |
+
return JSONResponse({"status": "OK"}, headers=NOCACHE_HEADERS)
|
254 |
+
except Exception as e:
|
255 |
+
log.error(traceback.format_exc())
|
256 |
+
raise HTTPException(status_code=500, detail=str(e))
|
257 |
+
|
258 |
+
|
259 |
+
def stream_internal(task_id: int):
|
260 |
+
# TODO Move to WebSockets ??
|
261 |
+
task = task_manager.get_cached_task(task_id, update_ttl=True)
|
262 |
+
if not task:
|
263 |
+
raise HTTPException(status_code=404, detail=f"Request {task_id} not found.") # HTTP404 NotFound
|
264 |
+
# if (id(task) != task_id): raise HTTPException(status_code=409, detail=f'Wrong task id received. Expected:{id(task)}, Received:{task_id}') # HTTP409 Conflict
|
265 |
+
if task.buffer_queue.empty() and not task.lock.locked():
|
266 |
+
if task.response:
|
267 |
+
# log.info(f'Session {session_id} sending cached response')
|
268 |
+
return JSONResponse(task.response, headers=NOCACHE_HEADERS)
|
269 |
+
raise HTTPException(status_code=425, detail="Too Early, task not started yet.") # HTTP425 Too Early
|
270 |
+
# log.info(f'Session {session_id} opened live render stream {id(task.buffer_queue)}')
|
271 |
+
return StreamingResponse(task.read_buffer_generator(), media_type="application/json")
|
272 |
+
|
273 |
+
|
274 |
+
def stop_internal(task: int):
|
275 |
+
if not task:
|
276 |
+
if (
|
277 |
+
task_manager.current_state == task_manager.ServerStates.Online
|
278 |
+
or task_manager.current_state == task_manager.ServerStates.Unavailable
|
279 |
+
):
|
280 |
+
raise HTTPException(status_code=409, detail="Not currently running any tasks.") # HTTP409 Conflict
|
281 |
+
task_manager.current_state_error = StopAsyncIteration("")
|
282 |
+
return {"OK"}
|
283 |
+
task_id = task
|
284 |
+
task = task_manager.get_cached_task(task_id, update_ttl=False)
|
285 |
+
if not task:
|
286 |
+
raise HTTPException(status_code=404, detail=f"Task {task_id} was not found.") # HTTP404 Not Found
|
287 |
+
if isinstance(task.error, StopAsyncIteration):
|
288 |
+
raise HTTPException(status_code=409, detail=f"Task {task_id} is already stopped.") # HTTP409 Conflict
|
289 |
+
task.error = StopAsyncIteration(f"Task {task_id} stop requested.")
|
290 |
+
return {"OK"}
|
291 |
+
|
292 |
+
|
293 |
+
def get_image_internal(task_id: int, img_id: int):
|
294 |
+
task = task_manager.get_cached_task(task_id, update_ttl=True)
|
295 |
+
if not task:
|
296 |
+
raise HTTPException(status_code=410, detail=f"Task {task_id} could not be found.") # HTTP404 NotFound
|
297 |
+
if not task.temp_images[img_id]:
|
298 |
+
raise HTTPException(status_code=425, detail="Too Early, task data is not available yet.") # HTTP425 Too Early
|
299 |
+
try:
|
300 |
+
img_data = task.temp_images[img_id]
|
301 |
+
img_data.seek(0)
|
302 |
+
return StreamingResponse(img_data, media_type="image/jpeg")
|
303 |
+
except KeyError as e:
|
304 |
+
raise HTTPException(status_code=500, detail=str(e))
|
ui/easydiffusion/task_manager.py
ADDED
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""task_manager.py: manage tasks dispatching and render threads.
|
2 |
+
Notes:
|
3 |
+
render_threads should be the only hard reference held by the manager to the threads.
|
4 |
+
Use weak_thread_data to store all other data using weak keys.
|
5 |
+
This will allow for garbage collection after the thread dies.
|
6 |
+
"""
|
7 |
+
import json
|
8 |
+
import traceback
|
9 |
+
|
10 |
+
TASK_TTL = 15 * 60 # seconds, Discard last session's task timeout
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import queue, threading, time, weakref
|
14 |
+
from typing import Any, Hashable
|
15 |
+
|
16 |
+
from easydiffusion import device_manager
|
17 |
+
from easydiffusion.types import TaskData, GenerateImageRequest
|
18 |
+
from easydiffusion.utils import log
|
19 |
+
|
20 |
+
from sdkit.utils import gc
|
21 |
+
|
22 |
+
THREAD_NAME_PREFIX = ""
|
23 |
+
ERR_LOCK_FAILED = " failed to acquire lock within timeout."
|
24 |
+
LOCK_TIMEOUT = 15 # Maximum locking time in seconds before failing a task.
|
25 |
+
# It's better to get an exception than a deadlock... ALWAYS use timeout in critical paths.
|
26 |
+
|
27 |
+
DEVICE_START_TIMEOUT = 60 # seconds - Maximum time to wait for a render device to init.
|
28 |
+
|
29 |
+
|
30 |
+
class SymbolClass(type): # Print nicely formatted Symbol names.
|
31 |
+
def __repr__(self):
|
32 |
+
return self.__qualname__
|
33 |
+
|
34 |
+
def __str__(self):
|
35 |
+
return self.__name__
|
36 |
+
|
37 |
+
|
38 |
+
class Symbol(metaclass=SymbolClass):
|
39 |
+
pass
|
40 |
+
|
41 |
+
|
42 |
+
class ServerStates:
|
43 |
+
class Init(Symbol):
|
44 |
+
pass
|
45 |
+
|
46 |
+
class LoadingModel(Symbol):
|
47 |
+
pass
|
48 |
+
|
49 |
+
class Online(Symbol):
|
50 |
+
pass
|
51 |
+
|
52 |
+
class Rendering(Symbol):
|
53 |
+
pass
|
54 |
+
|
55 |
+
class Unavailable(Symbol):
|
56 |
+
pass
|
57 |
+
|
58 |
+
|
59 |
+
class RenderTask: # Task with output queue and completion lock.
|
60 |
+
def __init__(self, req: GenerateImageRequest, task_data: TaskData):
|
61 |
+
task_data.request_id = id(self)
|
62 |
+
self.render_request: GenerateImageRequest = req # Initial Request
|
63 |
+
self.task_data: TaskData = task_data
|
64 |
+
self.response: Any = None # Copy of the last reponse
|
65 |
+
self.render_device = None # Select the task affinity. (Not used to change active devices).
|
66 |
+
self.temp_images: list = [None] * req.num_outputs * (1 if task_data.show_only_filtered_image else 2)
|
67 |
+
self.error: Exception = None
|
68 |
+
self.lock: threading.Lock = threading.Lock() # Locks at task start and unlocks when task is completed
|
69 |
+
self.buffer_queue: queue.Queue = queue.Queue() # Queue of JSON string segments
|
70 |
+
|
71 |
+
async def read_buffer_generator(self):
|
72 |
+
try:
|
73 |
+
while not self.buffer_queue.empty():
|
74 |
+
res = self.buffer_queue.get(block=False)
|
75 |
+
self.buffer_queue.task_done()
|
76 |
+
yield res
|
77 |
+
except queue.Empty as e:
|
78 |
+
yield
|
79 |
+
|
80 |
+
@property
|
81 |
+
def status(self):
|
82 |
+
if self.lock.locked():
|
83 |
+
return "running"
|
84 |
+
if isinstance(self.error, StopAsyncIteration):
|
85 |
+
return "stopped"
|
86 |
+
if self.error:
|
87 |
+
return "error"
|
88 |
+
if not self.buffer_queue.empty():
|
89 |
+
return "buffer"
|
90 |
+
if self.response:
|
91 |
+
return "completed"
|
92 |
+
return "pending"
|
93 |
+
|
94 |
+
@property
|
95 |
+
def is_pending(self):
|
96 |
+
return bool(not self.response and not self.error)
|
97 |
+
|
98 |
+
|
99 |
+
# Temporary cache to allow to query tasks results for a short time after they are completed.
|
100 |
+
class DataCache:
|
101 |
+
def __init__(self):
|
102 |
+
self._base = dict()
|
103 |
+
self._lock: threading.Lock = threading.Lock()
|
104 |
+
|
105 |
+
def _get_ttl_time(self, ttl: int) -> int:
|
106 |
+
return int(time.time()) + ttl
|
107 |
+
|
108 |
+
def _is_expired(self, timestamp: int) -> bool:
|
109 |
+
return int(time.time()) >= timestamp
|
110 |
+
|
111 |
+
def clean(self) -> None:
|
112 |
+
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
113 |
+
raise Exception("DataCache.clean" + ERR_LOCK_FAILED)
|
114 |
+
try:
|
115 |
+
# Create a list of expired keys to delete
|
116 |
+
to_delete = []
|
117 |
+
for key in self._base:
|
118 |
+
ttl, _ = self._base[key]
|
119 |
+
if self._is_expired(ttl):
|
120 |
+
to_delete.append(key)
|
121 |
+
# Remove Items
|
122 |
+
for key in to_delete:
|
123 |
+
(_, val) = self._base[key]
|
124 |
+
if isinstance(val, RenderTask):
|
125 |
+
log.debug(f"RenderTask {key} expired. Data removed.")
|
126 |
+
elif isinstance(val, SessionState):
|
127 |
+
log.debug(f"Session {key} expired. Data removed.")
|
128 |
+
else:
|
129 |
+
log.debug(f"Key {key} expired. Data removed.")
|
130 |
+
del self._base[key]
|
131 |
+
finally:
|
132 |
+
self._lock.release()
|
133 |
+
|
134 |
+
def clear(self) -> None:
|
135 |
+
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
136 |
+
raise Exception("DataCache.clear" + ERR_LOCK_FAILED)
|
137 |
+
try:
|
138 |
+
self._base.clear()
|
139 |
+
finally:
|
140 |
+
self._lock.release()
|
141 |
+
|
142 |
+
def delete(self, key: Hashable) -> bool:
|
143 |
+
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
144 |
+
raise Exception("DataCache.delete" + ERR_LOCK_FAILED)
|
145 |
+
try:
|
146 |
+
if key not in self._base:
|
147 |
+
return False
|
148 |
+
del self._base[key]
|
149 |
+
return True
|
150 |
+
finally:
|
151 |
+
self._lock.release()
|
152 |
+
|
153 |
+
def keep(self, key: Hashable, ttl: int) -> bool:
|
154 |
+
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
155 |
+
raise Exception("DataCache.keep" + ERR_LOCK_FAILED)
|
156 |
+
try:
|
157 |
+
if key in self._base:
|
158 |
+
_, value = self._base.get(key)
|
159 |
+
self._base[key] = (self._get_ttl_time(ttl), value)
|
160 |
+
return True
|
161 |
+
return False
|
162 |
+
finally:
|
163 |
+
self._lock.release()
|
164 |
+
|
165 |
+
def put(self, key: Hashable, value: Any, ttl: int) -> bool:
|
166 |
+
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
167 |
+
raise Exception("DataCache.put" + ERR_LOCK_FAILED)
|
168 |
+
try:
|
169 |
+
self._base[key] = (self._get_ttl_time(ttl), value)
|
170 |
+
except Exception as e:
|
171 |
+
log.error(traceback.format_exc())
|
172 |
+
return False
|
173 |
+
else:
|
174 |
+
return True
|
175 |
+
finally:
|
176 |
+
self._lock.release()
|
177 |
+
|
178 |
+
def tryGet(self, key: Hashable) -> Any:
|
179 |
+
if not self._lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
180 |
+
raise Exception("DataCache.tryGet" + ERR_LOCK_FAILED)
|
181 |
+
try:
|
182 |
+
ttl, value = self._base.get(key, (None, None))
|
183 |
+
if ttl is not None and self._is_expired(ttl):
|
184 |
+
log.debug(f"Session {key} expired. Discarding data.")
|
185 |
+
del self._base[key]
|
186 |
+
return None
|
187 |
+
return value
|
188 |
+
finally:
|
189 |
+
self._lock.release()
|
190 |
+
|
191 |
+
|
192 |
+
manager_lock = threading.RLock()
|
193 |
+
render_threads = []
|
194 |
+
current_state = ServerStates.Init
|
195 |
+
current_state_error: Exception = None
|
196 |
+
tasks_queue = []
|
197 |
+
session_cache = DataCache()
|
198 |
+
task_cache = DataCache()
|
199 |
+
weak_thread_data = weakref.WeakKeyDictionary()
|
200 |
+
idle_event: threading.Event = threading.Event()
|
201 |
+
|
202 |
+
|
203 |
+
class SessionState:
|
204 |
+
def __init__(self, id: str):
|
205 |
+
self._id = id
|
206 |
+
self._tasks_ids = []
|
207 |
+
|
208 |
+
@property
|
209 |
+
def id(self):
|
210 |
+
return self._id
|
211 |
+
|
212 |
+
@property
|
213 |
+
def tasks(self):
|
214 |
+
tasks = []
|
215 |
+
for task_id in self._tasks_ids:
|
216 |
+
task = task_cache.tryGet(task_id)
|
217 |
+
if task:
|
218 |
+
tasks.append(task)
|
219 |
+
return tasks
|
220 |
+
|
221 |
+
def put(self, task, ttl=TASK_TTL):
|
222 |
+
task_id = id(task)
|
223 |
+
self._tasks_ids.append(task_id)
|
224 |
+
if not task_cache.put(task_id, task, ttl):
|
225 |
+
return False
|
226 |
+
while len(self._tasks_ids) > len(render_threads) * 2:
|
227 |
+
self._tasks_ids.pop(0)
|
228 |
+
return True
|
229 |
+
|
230 |
+
|
231 |
+
def thread_get_next_task():
|
232 |
+
from easydiffusion import renderer
|
233 |
+
|
234 |
+
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
235 |
+
log.warn(f"Render thread on device: {renderer.context.device} failed to acquire manager lock.")
|
236 |
+
return None
|
237 |
+
if len(tasks_queue) <= 0:
|
238 |
+
manager_lock.release()
|
239 |
+
return None
|
240 |
+
task = None
|
241 |
+
try: # Select a render task.
|
242 |
+
for queued_task in tasks_queue:
|
243 |
+
if queued_task.render_device and renderer.context.device != queued_task.render_device:
|
244 |
+
# Is asking for a specific render device.
|
245 |
+
if is_alive(queued_task.render_device) > 0:
|
246 |
+
continue # requested device alive, skip current one.
|
247 |
+
else:
|
248 |
+
# Requested device is not active, return error to UI.
|
249 |
+
queued_task.error = Exception(queued_task.render_device + " is not currently active.")
|
250 |
+
task = queued_task
|
251 |
+
break
|
252 |
+
if not queued_task.render_device and renderer.context.device == "cpu" and is_alive() > 1:
|
253 |
+
# not asking for any specific devices, cpu want to grab task but other render devices are alive.
|
254 |
+
continue # Skip Tasks, don't run on CPU unless there is nothing else or user asked for it.
|
255 |
+
task = queued_task
|
256 |
+
break
|
257 |
+
if task is not None:
|
258 |
+
del tasks_queue[tasks_queue.index(task)]
|
259 |
+
return task
|
260 |
+
finally:
|
261 |
+
manager_lock.release()
|
262 |
+
|
263 |
+
|
264 |
+
def thread_render(device):
|
265 |
+
global current_state, current_state_error
|
266 |
+
|
267 |
+
from easydiffusion import renderer, model_manager
|
268 |
+
|
269 |
+
try:
|
270 |
+
renderer.init(device)
|
271 |
+
|
272 |
+
weak_thread_data[threading.current_thread()] = {
|
273 |
+
"device": renderer.context.device,
|
274 |
+
"device_name": renderer.context.device_name,
|
275 |
+
"alive": True,
|
276 |
+
}
|
277 |
+
|
278 |
+
current_state = ServerStates.LoadingModel
|
279 |
+
model_manager.load_default_models(renderer.context)
|
280 |
+
|
281 |
+
current_state = ServerStates.Online
|
282 |
+
except Exception as e:
|
283 |
+
log.error(traceback.format_exc())
|
284 |
+
weak_thread_data[threading.current_thread()] = {"error": e, "alive": False}
|
285 |
+
return
|
286 |
+
|
287 |
+
while True:
|
288 |
+
session_cache.clean()
|
289 |
+
task_cache.clean()
|
290 |
+
if not weak_thread_data[threading.current_thread()]["alive"]:
|
291 |
+
log.info(f"Shutting down thread for device {renderer.context.device}")
|
292 |
+
model_manager.unload_all(renderer.context)
|
293 |
+
return
|
294 |
+
if isinstance(current_state_error, SystemExit):
|
295 |
+
current_state = ServerStates.Unavailable
|
296 |
+
return
|
297 |
+
task = thread_get_next_task()
|
298 |
+
if task is None:
|
299 |
+
idle_event.clear()
|
300 |
+
idle_event.wait(timeout=1)
|
301 |
+
continue
|
302 |
+
if task.error is not None:
|
303 |
+
log.error(task.error)
|
304 |
+
task.response = {"status": "failed", "detail": str(task.error)}
|
305 |
+
task.buffer_queue.put(json.dumps(task.response))
|
306 |
+
continue
|
307 |
+
if current_state_error:
|
308 |
+
task.error = current_state_error
|
309 |
+
task.response = {"status": "failed", "detail": str(task.error)}
|
310 |
+
task.buffer_queue.put(json.dumps(task.response))
|
311 |
+
continue
|
312 |
+
log.info(f"Session {task.task_data.session_id} starting task {id(task)} on {renderer.context.device_name}")
|
313 |
+
if not task.lock.acquire(blocking=False):
|
314 |
+
raise Exception("Got locked task from queue.")
|
315 |
+
try:
|
316 |
+
|
317 |
+
def step_callback():
|
318 |
+
global current_state_error
|
319 |
+
|
320 |
+
if (
|
321 |
+
isinstance(current_state_error, SystemExit)
|
322 |
+
or isinstance(current_state_error, StopAsyncIteration)
|
323 |
+
or isinstance(task.error, StopAsyncIteration)
|
324 |
+
):
|
325 |
+
renderer.context.stop_processing = True
|
326 |
+
if isinstance(current_state_error, StopAsyncIteration):
|
327 |
+
task.error = current_state_error
|
328 |
+
current_state_error = None
|
329 |
+
log.info(f"Session {task.task_data.session_id} sent cancel signal for task {id(task)}")
|
330 |
+
|
331 |
+
current_state = ServerStates.LoadingModel
|
332 |
+
model_manager.resolve_model_paths(task.task_data)
|
333 |
+
model_manager.reload_models_if_necessary(renderer.context, task.task_data)
|
334 |
+
|
335 |
+
current_state = ServerStates.Rendering
|
336 |
+
task.response = renderer.make_images(
|
337 |
+
task.render_request, task.task_data, task.buffer_queue, task.temp_images, step_callback
|
338 |
+
)
|
339 |
+
# Before looping back to the generator, mark cache as still alive.
|
340 |
+
task_cache.keep(id(task), TASK_TTL)
|
341 |
+
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
342 |
+
except Exception as e:
|
343 |
+
task.error = str(e)
|
344 |
+
task.response = {"status": "failed", "detail": str(task.error)}
|
345 |
+
task.buffer_queue.put(json.dumps(task.response))
|
346 |
+
log.error(traceback.format_exc())
|
347 |
+
finally:
|
348 |
+
gc(renderer.context)
|
349 |
+
task.lock.release()
|
350 |
+
task_cache.keep(id(task), TASK_TTL)
|
351 |
+
session_cache.keep(task.task_data.session_id, TASK_TTL)
|
352 |
+
if isinstance(task.error, StopAsyncIteration):
|
353 |
+
log.info(f"Session {task.task_data.session_id} task {id(task)} cancelled!")
|
354 |
+
elif task.error is not None:
|
355 |
+
log.info(f"Session {task.task_data.session_id} task {id(task)} failed!")
|
356 |
+
else:
|
357 |
+
log.info(
|
358 |
+
f"Session {task.task_data.session_id} task {id(task)} completed by {renderer.context.device_name}."
|
359 |
+
)
|
360 |
+
current_state = ServerStates.Online
|
361 |
+
|
362 |
+
|
363 |
+
def get_cached_task(task_id: str, update_ttl: bool = False):
|
364 |
+
# By calling keep before tryGet, wont discard if was expired.
|
365 |
+
if update_ttl and not task_cache.keep(task_id, TASK_TTL):
|
366 |
+
# Failed to keep task, already gone.
|
367 |
+
return None
|
368 |
+
return task_cache.tryGet(task_id)
|
369 |
+
|
370 |
+
|
371 |
+
def get_cached_session(session_id: str, update_ttl: bool = False):
|
372 |
+
if update_ttl:
|
373 |
+
session_cache.keep(session_id, TASK_TTL)
|
374 |
+
session = session_cache.tryGet(session_id)
|
375 |
+
if not session:
|
376 |
+
session = SessionState(session_id)
|
377 |
+
session_cache.put(session_id, session, TASK_TTL)
|
378 |
+
return session
|
379 |
+
|
380 |
+
|
381 |
+
def get_devices():
|
382 |
+
devices = {
|
383 |
+
"all": {},
|
384 |
+
"active": {},
|
385 |
+
}
|
386 |
+
|
387 |
+
def get_device_info(device):
|
388 |
+
if device in ("cpu", "mps"):
|
389 |
+
return {"name": device_manager.get_processor_name()}
|
390 |
+
|
391 |
+
mem_free, mem_total = torch.cuda.mem_get_info(device)
|
392 |
+
mem_free /= float(10**9)
|
393 |
+
mem_total /= float(10**9)
|
394 |
+
|
395 |
+
return {
|
396 |
+
"name": torch.cuda.get_device_name(device),
|
397 |
+
"mem_free": mem_free,
|
398 |
+
"mem_total": mem_total,
|
399 |
+
"max_vram_usage_level": device_manager.get_max_vram_usage_level(device),
|
400 |
+
}
|
401 |
+
|
402 |
+
# list the compatible devices
|
403 |
+
cuda_count = torch.cuda.device_count()
|
404 |
+
for device in range(cuda_count):
|
405 |
+
device = f"cuda:{device}"
|
406 |
+
if not device_manager.is_device_compatible(device):
|
407 |
+
continue
|
408 |
+
|
409 |
+
devices["all"].update({device: get_device_info(device)})
|
410 |
+
|
411 |
+
if device_manager.is_mps_available():
|
412 |
+
devices["all"].update({"mps": get_device_info("mps")})
|
413 |
+
|
414 |
+
devices["all"].update({"cpu": get_device_info("cpu")})
|
415 |
+
|
416 |
+
# list the activated devices
|
417 |
+
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
418 |
+
raise Exception("get_devices" + ERR_LOCK_FAILED)
|
419 |
+
try:
|
420 |
+
for rthread in render_threads:
|
421 |
+
if not rthread.is_alive():
|
422 |
+
continue
|
423 |
+
weak_data = weak_thread_data.get(rthread)
|
424 |
+
if not weak_data or not "device" in weak_data or not "device_name" in weak_data:
|
425 |
+
continue
|
426 |
+
device = weak_data["device"]
|
427 |
+
devices["active"].update({device: get_device_info(device)})
|
428 |
+
finally:
|
429 |
+
manager_lock.release()
|
430 |
+
|
431 |
+
return devices
|
432 |
+
|
433 |
+
|
434 |
+
def is_alive(device=None):
|
435 |
+
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
436 |
+
raise Exception("is_alive" + ERR_LOCK_FAILED)
|
437 |
+
nbr_alive = 0
|
438 |
+
try:
|
439 |
+
for rthread in render_threads:
|
440 |
+
if device is not None:
|
441 |
+
weak_data = weak_thread_data.get(rthread)
|
442 |
+
if weak_data is None or not "device" in weak_data or weak_data["device"] is None:
|
443 |
+
continue
|
444 |
+
thread_device = weak_data["device"]
|
445 |
+
if thread_device != device:
|
446 |
+
continue
|
447 |
+
if rthread.is_alive():
|
448 |
+
nbr_alive += 1
|
449 |
+
return nbr_alive
|
450 |
+
finally:
|
451 |
+
manager_lock.release()
|
452 |
+
|
453 |
+
|
454 |
+
def start_render_thread(device):
|
455 |
+
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
456 |
+
raise Exception("start_render_thread" + ERR_LOCK_FAILED)
|
457 |
+
log.info(f"Start new Rendering Thread on device: {device}")
|
458 |
+
try:
|
459 |
+
rthread = threading.Thread(target=thread_render, kwargs={"device": device})
|
460 |
+
rthread.daemon = True
|
461 |
+
rthread.name = THREAD_NAME_PREFIX + device
|
462 |
+
rthread.start()
|
463 |
+
render_threads.append(rthread)
|
464 |
+
finally:
|
465 |
+
manager_lock.release()
|
466 |
+
timeout = DEVICE_START_TIMEOUT
|
467 |
+
while not rthread.is_alive() or not rthread in weak_thread_data or not "device" in weak_thread_data[rthread]:
|
468 |
+
if rthread in weak_thread_data and "error" in weak_thread_data[rthread]:
|
469 |
+
log.error(f"{rthread}, {device}, error: {weak_thread_data[rthread]['error']}")
|
470 |
+
return False
|
471 |
+
if timeout <= 0:
|
472 |
+
return False
|
473 |
+
timeout -= 1
|
474 |
+
time.sleep(1)
|
475 |
+
return True
|
476 |
+
|
477 |
+
|
478 |
+
def stop_render_thread(device):
|
479 |
+
try:
|
480 |
+
device_manager.validate_device_id(device, log_prefix="stop_render_thread")
|
481 |
+
except:
|
482 |
+
log.error(traceback.format_exc())
|
483 |
+
return False
|
484 |
+
|
485 |
+
if not manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT):
|
486 |
+
raise Exception("stop_render_thread" + ERR_LOCK_FAILED)
|
487 |
+
log.info(f"Stopping Rendering Thread on device: {device}")
|
488 |
+
|
489 |
+
try:
|
490 |
+
thread_to_remove = None
|
491 |
+
for rthread in render_threads:
|
492 |
+
weak_data = weak_thread_data.get(rthread)
|
493 |
+
if weak_data is None or not "device" in weak_data or weak_data["device"] is None:
|
494 |
+
continue
|
495 |
+
thread_device = weak_data["device"]
|
496 |
+
if thread_device == device:
|
497 |
+
weak_data["alive"] = False
|
498 |
+
thread_to_remove = rthread
|
499 |
+
break
|
500 |
+
if thread_to_remove is not None:
|
501 |
+
render_threads.remove(rthread)
|
502 |
+
return True
|
503 |
+
finally:
|
504 |
+
manager_lock.release()
|
505 |
+
|
506 |
+
return False
|
507 |
+
|
508 |
+
|
509 |
+
def update_render_threads(render_devices, active_devices):
|
510 |
+
devices_to_start, devices_to_stop = device_manager.get_device_delta(render_devices, active_devices)
|
511 |
+
log.debug(f"devices_to_start: {devices_to_start}")
|
512 |
+
log.debug(f"devices_to_stop: {devices_to_stop}")
|
513 |
+
|
514 |
+
for device in devices_to_stop:
|
515 |
+
if is_alive(device) <= 0:
|
516 |
+
log.debug(f"{device} is not alive")
|
517 |
+
continue
|
518 |
+
if not stop_render_thread(device):
|
519 |
+
log.warn(f"{device} could not stop render thread")
|
520 |
+
|
521 |
+
for device in devices_to_start:
|
522 |
+
if is_alive(device) >= 1:
|
523 |
+
log.debug(f"{device} already registered.")
|
524 |
+
continue
|
525 |
+
if not start_render_thread(device):
|
526 |
+
log.warn(f"{device} failed to start.")
|
527 |
+
|
528 |
+
if is_alive() <= 0: # No running devices, probably invalid user config.
|
529 |
+
raise EnvironmentError(
|
530 |
+
'ERROR: No active render devices! Please verify the "render_devices" value in config.json'
|
531 |
+
)
|
532 |
+
|
533 |
+
log.debug(f"active devices: {get_devices()['active']}")
|
534 |
+
|
535 |
+
|
536 |
+
def shutdown_event(): # Signal render thread to close on shutdown
|
537 |
+
global current_state_error
|
538 |
+
current_state_error = SystemExit("Application shutting down.")
|
539 |
+
|
540 |
+
|
541 |
+
def render(render_req: GenerateImageRequest, task_data: TaskData):
|
542 |
+
current_thread_count = is_alive()
|
543 |
+
if current_thread_count <= 0: # Render thread is dead
|
544 |
+
raise ChildProcessError("Rendering thread has died.")
|
545 |
+
|
546 |
+
# Alive, check if task in cache
|
547 |
+
session = get_cached_session(task_data.session_id, update_ttl=True)
|
548 |
+
pending_tasks = list(filter(lambda t: t.is_pending, session.tasks))
|
549 |
+
if current_thread_count < len(pending_tasks):
|
550 |
+
raise ConnectionRefusedError(
|
551 |
+
f"Session {task_data.session_id} already has {len(pending_tasks)} pending tasks out of {current_thread_count}."
|
552 |
+
)
|
553 |
+
|
554 |
+
new_task = RenderTask(render_req, task_data)
|
555 |
+
if session.put(new_task, TASK_TTL):
|
556 |
+
# Use twice the normal timeout for adding user requests.
|
557 |
+
# Tries to force session.put to fail before tasks_queue.put would.
|
558 |
+
if manager_lock.acquire(blocking=True, timeout=LOCK_TIMEOUT * 2):
|
559 |
+
try:
|
560 |
+
tasks_queue.append(new_task)
|
561 |
+
idle_event.set()
|
562 |
+
return new_task
|
563 |
+
finally:
|
564 |
+
manager_lock.release()
|
565 |
+
raise RuntimeError("Failed to add task to cache.")
|
ui/easydiffusion/types.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel
|
2 |
+
from typing import Any
|
3 |
+
|
4 |
+
|
5 |
+
class GenerateImageRequest(BaseModel):
|
6 |
+
prompt: str = ""
|
7 |
+
negative_prompt: str = ""
|
8 |
+
|
9 |
+
seed: int = 42
|
10 |
+
width: int = 512
|
11 |
+
height: int = 512
|
12 |
+
|
13 |
+
num_outputs: int = 1
|
14 |
+
num_inference_steps: int = 50
|
15 |
+
guidance_scale: float = 7.5
|
16 |
+
|
17 |
+
init_image: Any = None
|
18 |
+
init_image_mask: Any = None
|
19 |
+
prompt_strength: float = 0.8
|
20 |
+
preserve_init_image_color_profile = False
|
21 |
+
|
22 |
+
sampler_name: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"
|
23 |
+
hypernetwork_strength: float = 0
|
24 |
+
|
25 |
+
|
26 |
+
class TaskData(BaseModel):
|
27 |
+
request_id: str = None
|
28 |
+
session_id: str = "session"
|
29 |
+
save_to_disk_path: str = None
|
30 |
+
vram_usage_level: str = "balanced" # or "low" or "medium"
|
31 |
+
|
32 |
+
use_face_correction: str = None # or "GFPGANv1.3"
|
33 |
+
use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B"
|
34 |
+
upscale_amount: int = 4 # or 2
|
35 |
+
use_stable_diffusion_model: str = "sd-v1-4"
|
36 |
+
# use_stable_diffusion_config: str = "v1-inference"
|
37 |
+
use_vae_model: str = None
|
38 |
+
use_hypernetwork_model: str = None
|
39 |
+
|
40 |
+
show_only_filtered_image: bool = False
|
41 |
+
block_nsfw: bool = False
|
42 |
+
output_format: str = "jpeg" # or "png" or "webp"
|
43 |
+
output_quality: int = 75
|
44 |
+
metadata_output_format: str = "txt" # or "json"
|
45 |
+
stream_image_progress: bool = False
|
46 |
+
stream_image_progress_interval: int = 5
|
47 |
+
|
48 |
+
|
49 |
+
class MergeRequest(BaseModel):
|
50 |
+
model0: str = None
|
51 |
+
model1: str = None
|
52 |
+
ratio: float = None
|
53 |
+
out_path: str = "mix"
|
54 |
+
use_fp16 = True
|
55 |
+
|
56 |
+
|
57 |
+
class Image:
|
58 |
+
data: str # base64
|
59 |
+
seed: int
|
60 |
+
is_nsfw: bool
|
61 |
+
path_abs: str = None
|
62 |
+
|
63 |
+
def __init__(self, data, seed):
|
64 |
+
self.data = data
|
65 |
+
self.seed = seed
|
66 |
+
|
67 |
+
def json(self):
|
68 |
+
return {
|
69 |
+
"data": self.data,
|
70 |
+
"seed": self.seed,
|
71 |
+
"path_abs": self.path_abs,
|
72 |
+
}
|
73 |
+
|
74 |
+
|
75 |
+
class Response:
|
76 |
+
render_request: GenerateImageRequest
|
77 |
+
task_data: TaskData
|
78 |
+
images: list
|
79 |
+
|
80 |
+
def __init__(self, render_request: GenerateImageRequest, task_data: TaskData, images: list):
|
81 |
+
self.render_request = render_request
|
82 |
+
self.task_data = task_data
|
83 |
+
self.images = images
|
84 |
+
|
85 |
+
def json(self):
|
86 |
+
del self.render_request.init_image
|
87 |
+
del self.render_request.init_image_mask
|
88 |
+
|
89 |
+
res = {
|
90 |
+
"status": "succeeded",
|
91 |
+
"render_request": self.render_request.dict(),
|
92 |
+
"task_data": self.task_data.dict(),
|
93 |
+
"output": [],
|
94 |
+
}
|
95 |
+
|
96 |
+
for image in self.images:
|
97 |
+
res["output"].append(image.json())
|
98 |
+
|
99 |
+
return res
|
100 |
+
|
101 |
+
|
102 |
+
class UserInitiatedStop(Exception):
|
103 |
+
pass
|
ui/easydiffusion/utils/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
log = logging.getLogger("easydiffusion")
|
4 |
+
|
5 |
+
from .save_utils import (
|
6 |
+
save_images_to_disk,
|
7 |
+
get_printable_request,
|
8 |
+
)
|
ui/easydiffusion/utils/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (311 Bytes). View file
|
|
ui/easydiffusion/utils/__pycache__/save_utils.cpython-38.pyc
ADDED
Binary file (3.52 kB). View file
|
|
ui/easydiffusion/utils/save_utils.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import re
|
4 |
+
|
5 |
+
from easydiffusion.types import TaskData, GenerateImageRequest
|
6 |
+
|
7 |
+
from sdkit.utils import save_images, save_dicts
|
8 |
+
from numpy import base_repr
|
9 |
+
|
10 |
+
filename_regex = re.compile("[^a-zA-Z0-9._-]")
|
11 |
+
|
12 |
+
# keep in sync with `ui/media/js/dnd.js`
|
13 |
+
TASK_TEXT_MAPPING = {
|
14 |
+
"prompt": "Prompt",
|
15 |
+
"width": "Width",
|
16 |
+
"height": "Height",
|
17 |
+
"seed": "Seed",
|
18 |
+
"num_inference_steps": "Steps",
|
19 |
+
"guidance_scale": "Guidance Scale",
|
20 |
+
"prompt_strength": "Prompt Strength",
|
21 |
+
"use_face_correction": "Use Face Correction",
|
22 |
+
"use_upscale": "Use Upscaling",
|
23 |
+
"upscale_amount": "Upscale By",
|
24 |
+
"sampler_name": "Sampler",
|
25 |
+
"negative_prompt": "Negative Prompt",
|
26 |
+
"use_stable_diffusion_model": "Stable Diffusion model",
|
27 |
+
"use_vae_model": "VAE model",
|
28 |
+
"use_hypernetwork_model": "Hypernetwork model",
|
29 |
+
"hypernetwork_strength": "Hypernetwork Strength",
|
30 |
+
}
|
31 |
+
|
32 |
+
|
33 |
+
def save_images_to_disk(images: list, filtered_images: list, req: GenerateImageRequest, task_data: TaskData):
|
34 |
+
now = time.time()
|
35 |
+
save_dir_path = os.path.join(task_data.save_to_disk_path, filename_regex.sub("_", task_data.session_id))
|
36 |
+
metadata_entries = get_metadata_entries_for_request(req, task_data)
|
37 |
+
make_filename = make_filename_callback(req, now=now)
|
38 |
+
|
39 |
+
if task_data.show_only_filtered_image or filtered_images is images:
|
40 |
+
save_images(
|
41 |
+
filtered_images,
|
42 |
+
save_dir_path,
|
43 |
+
file_name=make_filename,
|
44 |
+
output_format=task_data.output_format,
|
45 |
+
output_quality=task_data.output_quality,
|
46 |
+
)
|
47 |
+
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
|
48 |
+
save_dicts(
|
49 |
+
metadata_entries,
|
50 |
+
save_dir_path,
|
51 |
+
file_name=make_filename,
|
52 |
+
output_format=task_data.metadata_output_format,
|
53 |
+
file_format=task_data.output_format,
|
54 |
+
)
|
55 |
+
else:
|
56 |
+
make_filter_filename = make_filename_callback(req, now=now, suffix="filtered")
|
57 |
+
|
58 |
+
save_images(
|
59 |
+
images,
|
60 |
+
save_dir_path,
|
61 |
+
file_name=make_filename,
|
62 |
+
output_format=task_data.output_format,
|
63 |
+
output_quality=task_data.output_quality,
|
64 |
+
)
|
65 |
+
save_images(
|
66 |
+
filtered_images,
|
67 |
+
save_dir_path,
|
68 |
+
file_name=make_filter_filename,
|
69 |
+
output_format=task_data.output_format,
|
70 |
+
output_quality=task_data.output_quality,
|
71 |
+
)
|
72 |
+
if task_data.metadata_output_format.lower() in ["json", "txt", "embed"]:
|
73 |
+
save_dicts(
|
74 |
+
metadata_entries,
|
75 |
+
save_dir_path,
|
76 |
+
file_name=make_filter_filename,
|
77 |
+
output_format=task_data.metadata_output_format,
|
78 |
+
file_format=task_data.output_format,
|
79 |
+
)
|
80 |
+
|
81 |
+
|
82 |
+
def get_metadata_entries_for_request(req: GenerateImageRequest, task_data: TaskData):
|
83 |
+
metadata = get_printable_request(req)
|
84 |
+
metadata.update(
|
85 |
+
{
|
86 |
+
"use_stable_diffusion_model": task_data.use_stable_diffusion_model,
|
87 |
+
"use_vae_model": task_data.use_vae_model,
|
88 |
+
"use_hypernetwork_model": task_data.use_hypernetwork_model,
|
89 |
+
"use_face_correction": task_data.use_face_correction,
|
90 |
+
"use_upscale": task_data.use_upscale,
|
91 |
+
}
|
92 |
+
)
|
93 |
+
if metadata["use_upscale"] is not None:
|
94 |
+
metadata["upscale_amount"] = task_data.upscale_amount
|
95 |
+
if task_data.use_hypernetwork_model is None:
|
96 |
+
del metadata["hypernetwork_strength"]
|
97 |
+
|
98 |
+
# if text, format it in the text format expected by the UI
|
99 |
+
is_txt_format = task_data.metadata_output_format.lower() == "txt"
|
100 |
+
if is_txt_format:
|
101 |
+
metadata = {TASK_TEXT_MAPPING[key]: val for key, val in metadata.items() if key in TASK_TEXT_MAPPING}
|
102 |
+
|
103 |
+
entries = [metadata.copy() for _ in range(req.num_outputs)]
|
104 |
+
for i, entry in enumerate(entries):
|
105 |
+
entry["Seed" if is_txt_format else "seed"] = req.seed + i
|
106 |
+
|
107 |
+
return entries
|
108 |
+
|
109 |
+
|
110 |
+
def get_printable_request(req: GenerateImageRequest):
|
111 |
+
metadata = req.dict()
|
112 |
+
del metadata["init_image"]
|
113 |
+
del metadata["init_image_mask"]
|
114 |
+
if req.init_image is None:
|
115 |
+
del metadata["prompt_strength"]
|
116 |
+
return metadata
|
117 |
+
|
118 |
+
|
119 |
+
def make_filename_callback(req: GenerateImageRequest, suffix=None, now=None):
|
120 |
+
if now is None:
|
121 |
+
now = time.time()
|
122 |
+
|
123 |
+
def make_filename(i):
|
124 |
+
img_id = base_repr(int(now * 10000), 36)[-7:] + base_repr(int(i),36) # Base 36 conversion, 0-9, A-Z
|
125 |
+
|
126 |
+
prompt_flattened = filename_regex.sub("_", req.prompt)[:50]
|
127 |
+
name = f"{prompt_flattened}_{img_id}"
|
128 |
+
name = name if suffix is None else f"{name}_{suffix}"
|
129 |
+
return name
|
130 |
+
|
131 |
+
return make_filename
|
ui/hotfix/9c24e6cd9f499d02c4f21a033736dabd365962dc80fe3aeb57a8f85ea45a20a3.26fead7ea4f0f843f6eb4055dfd25693f1a71f3c6871b184042d4b126244e142
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "clip-vit-large-patch14/",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPModel"
|
5 |
+
],
|
6 |
+
"initializer_factor": 1.0,
|
7 |
+
"logit_scale_init_value": 2.6592,
|
8 |
+
"model_type": "clip",
|
9 |
+
"projection_dim": 768,
|
10 |
+
"text_config": {
|
11 |
+
"_name_or_path": "",
|
12 |
+
"add_cross_attention": false,
|
13 |
+
"architectures": null,
|
14 |
+
"attention_dropout": 0.0,
|
15 |
+
"bad_words_ids": null,
|
16 |
+
"bos_token_id": 0,
|
17 |
+
"chunk_size_feed_forward": 0,
|
18 |
+
"cross_attention_hidden_size": null,
|
19 |
+
"decoder_start_token_id": null,
|
20 |
+
"diversity_penalty": 0.0,
|
21 |
+
"do_sample": false,
|
22 |
+
"dropout": 0.0,
|
23 |
+
"early_stopping": false,
|
24 |
+
"encoder_no_repeat_ngram_size": 0,
|
25 |
+
"eos_token_id": 2,
|
26 |
+
"finetuning_task": null,
|
27 |
+
"forced_bos_token_id": null,
|
28 |
+
"forced_eos_token_id": null,
|
29 |
+
"hidden_act": "quick_gelu",
|
30 |
+
"hidden_size": 768,
|
31 |
+
"id2label": {
|
32 |
+
"0": "LABEL_0",
|
33 |
+
"1": "LABEL_1"
|
34 |
+
},
|
35 |
+
"initializer_factor": 1.0,
|
36 |
+
"initializer_range": 0.02,
|
37 |
+
"intermediate_size": 3072,
|
38 |
+
"is_decoder": false,
|
39 |
+
"is_encoder_decoder": false,
|
40 |
+
"label2id": {
|
41 |
+
"LABEL_0": 0,
|
42 |
+
"LABEL_1": 1
|
43 |
+
},
|
44 |
+
"layer_norm_eps": 1e-05,
|
45 |
+
"length_penalty": 1.0,
|
46 |
+
"max_length": 20,
|
47 |
+
"max_position_embeddings": 77,
|
48 |
+
"min_length": 0,
|
49 |
+
"model_type": "clip_text_model",
|
50 |
+
"no_repeat_ngram_size": 0,
|
51 |
+
"num_attention_heads": 12,
|
52 |
+
"num_beam_groups": 1,
|
53 |
+
"num_beams": 1,
|
54 |
+
"num_hidden_layers": 12,
|
55 |
+
"num_return_sequences": 1,
|
56 |
+
"output_attentions": false,
|
57 |
+
"output_hidden_states": false,
|
58 |
+
"output_scores": false,
|
59 |
+
"pad_token_id": 1,
|
60 |
+
"prefix": null,
|
61 |
+
"problem_type": null,
|
62 |
+
"projection_dim" : 768,
|
63 |
+
"pruned_heads": {},
|
64 |
+
"remove_invalid_values": false,
|
65 |
+
"repetition_penalty": 1.0,
|
66 |
+
"return_dict": true,
|
67 |
+
"return_dict_in_generate": false,
|
68 |
+
"sep_token_id": null,
|
69 |
+
"task_specific_params": null,
|
70 |
+
"temperature": 1.0,
|
71 |
+
"tie_encoder_decoder": false,
|
72 |
+
"tie_word_embeddings": true,
|
73 |
+
"tokenizer_class": null,
|
74 |
+
"top_k": 50,
|
75 |
+
"top_p": 1.0,
|
76 |
+
"torch_dtype": null,
|
77 |
+
"torchscript": false,
|
78 |
+
"transformers_version": "4.16.0.dev0",
|
79 |
+
"use_bfloat16": false,
|
80 |
+
"vocab_size": 49408
|
81 |
+
},
|
82 |
+
"text_config_dict": {
|
83 |
+
"hidden_size": 768,
|
84 |
+
"intermediate_size": 3072,
|
85 |
+
"num_attention_heads": 12,
|
86 |
+
"num_hidden_layers": 12,
|
87 |
+
"projection_dim": 768
|
88 |
+
},
|
89 |
+
"torch_dtype": "float32",
|
90 |
+
"transformers_version": null,
|
91 |
+
"vision_config": {
|
92 |
+
"_name_or_path": "",
|
93 |
+
"add_cross_attention": false,
|
94 |
+
"architectures": null,
|
95 |
+
"attention_dropout": 0.0,
|
96 |
+
"bad_words_ids": null,
|
97 |
+
"bos_token_id": null,
|
98 |
+
"chunk_size_feed_forward": 0,
|
99 |
+
"cross_attention_hidden_size": null,
|
100 |
+
"decoder_start_token_id": null,
|
101 |
+
"diversity_penalty": 0.0,
|
102 |
+
"do_sample": false,
|
103 |
+
"dropout": 0.0,
|
104 |
+
"early_stopping": false,
|
105 |
+
"encoder_no_repeat_ngram_size": 0,
|
106 |
+
"eos_token_id": null,
|
107 |
+
"finetuning_task": null,
|
108 |
+
"forced_bos_token_id": null,
|
109 |
+
"forced_eos_token_id": null,
|
110 |
+
"hidden_act": "quick_gelu",
|
111 |
+
"hidden_size": 1024,
|
112 |
+
"id2label": {
|
113 |
+
"0": "LABEL_0",
|
114 |
+
"1": "LABEL_1"
|
115 |
+
},
|
116 |
+
"image_size": 224,
|
117 |
+
"initializer_factor": 1.0,
|
118 |
+
"initializer_range": 0.02,
|
119 |
+
"intermediate_size": 4096,
|
120 |
+
"is_decoder": false,
|
121 |
+
"is_encoder_decoder": false,
|
122 |
+
"label2id": {
|
123 |
+
"LABEL_0": 0,
|
124 |
+
"LABEL_1": 1
|
125 |
+
},
|
126 |
+
"layer_norm_eps": 1e-05,
|
127 |
+
"length_penalty": 1.0,
|
128 |
+
"max_length": 20,
|
129 |
+
"min_length": 0,
|
130 |
+
"model_type": "clip_vision_model",
|
131 |
+
"no_repeat_ngram_size": 0,
|
132 |
+
"num_attention_heads": 16,
|
133 |
+
"num_beam_groups": 1,
|
134 |
+
"num_beams": 1,
|
135 |
+
"num_hidden_layers": 24,
|
136 |
+
"num_return_sequences": 1,
|
137 |
+
"output_attentions": false,
|
138 |
+
"output_hidden_states": false,
|
139 |
+
"output_scores": false,
|
140 |
+
"pad_token_id": null,
|
141 |
+
"patch_size": 14,
|
142 |
+
"prefix": null,
|
143 |
+
"problem_type": null,
|
144 |
+
"projection_dim" : 768,
|
145 |
+
"pruned_heads": {},
|
146 |
+
"remove_invalid_values": false,
|
147 |
+
"repetition_penalty": 1.0,
|
148 |
+
"return_dict": true,
|
149 |
+
"return_dict_in_generate": false,
|
150 |
+
"sep_token_id": null,
|
151 |
+
"task_specific_params": null,
|
152 |
+
"temperature": 1.0,
|
153 |
+
"tie_encoder_decoder": false,
|
154 |
+
"tie_word_embeddings": true,
|
155 |
+
"tokenizer_class": null,
|
156 |
+
"top_k": 50,
|
157 |
+
"top_p": 1.0,
|
158 |
+
"torch_dtype": null,
|
159 |
+
"torchscript": false,
|
160 |
+
"transformers_version": "4.16.0.dev0",
|
161 |
+
"use_bfloat16": false
|
162 |
+
},
|
163 |
+
"vision_config_dict": {
|
164 |
+
"hidden_size": 1024,
|
165 |
+
"intermediate_size": 4096,
|
166 |
+
"num_attention_heads": 16,
|
167 |
+
"num_hidden_layers": 24,
|
168 |
+
"patch_size": 14,
|
169 |
+
"projection_dim": 768
|
170 |
+
}
|
171 |
+
}
|
ui/index.html
ADDED
@@ -0,0 +1,514 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html>
|
3 |
+
<head>
|
4 |
+
<title>Easy Diffusion</title>
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<meta name="theme-color" content="#673AB6">
|
7 |
+
<link rel="icon" type="image/png" href="/media/images/favicon-16x16.png" sizes="16x16">
|
8 |
+
<link rel="icon" type="image/png" href="/media/images/favicon-32x32.png" sizes="32x32">
|
9 |
+
<link rel="stylesheet" href="/media/css/jquery-confirm.min.css">
|
10 |
+
<link rel="stylesheet" href="/media/css/fonts.css">
|
11 |
+
<link rel="stylesheet" href="/media/css/themes.css">
|
12 |
+
<link rel="stylesheet" href="/media/css/main.css">
|
13 |
+
<link rel="stylesheet" href="/media/css/auto-save.css">
|
14 |
+
<link rel="stylesheet" href="/media/css/modifier-thumbnails.css">
|
15 |
+
<link rel="stylesheet" href="/media/css/fontawesome-all.min.css">
|
16 |
+
<link rel="stylesheet" href="/media/css/image-editor.css">
|
17 |
+
<link rel="stylesheet" href="/media/css/searchable-models.css">
|
18 |
+
<link rel="stylesheet" href="/media/css/image-modal.css">
|
19 |
+
<link rel="manifest" href="/media/manifest.webmanifest">
|
20 |
+
<script src="/media/js/jquery-3.6.1.min.js"></script>
|
21 |
+
<script src="/media/js/jquery-confirm.min.js"></script>
|
22 |
+
<script src="/media/js/jszip.min.js"></script>
|
23 |
+
<script src="/media/js/FileSaver.min.js"></script>
|
24 |
+
<script src="/media/js/marked.min.js"></script>
|
25 |
+
</head>
|
26 |
+
<body>
|
27 |
+
<div id="container">
|
28 |
+
<div id="top-nav">
|
29 |
+
<div id="logo">
|
30 |
+
<h1>
|
31 |
+
<img id="logo_img" src="/media/images/icon-512x512.png" >
|
32 |
+
Easy Diffusion
|
33 |
+
<small>v2.5.26 <span id="updateBranchLabel"></span></small>
|
34 |
+
</h1>
|
35 |
+
</div>
|
36 |
+
<div id="server-status">
|
37 |
+
<div id="server-status-color">●</div>
|
38 |
+
<span id="server-status-msg">Stable Diffusion is starting..</span>
|
39 |
+
</div>
|
40 |
+
<div id="tab-container" class="tab-container">
|
41 |
+
<span id="tab-main" class="tab active">
|
42 |
+
<span><i class="fa fa-image icon"></i> Generate</span>
|
43 |
+
</span>
|
44 |
+
<span id="tab-settings" class="tab">
|
45 |
+
<span><i class="fa fa-gear icon"></i> Settings</span>
|
46 |
+
</span>
|
47 |
+
<span id="tab-about" class="tab">
|
48 |
+
<span><i class="fa fa-comments icon"></i> Help & Community</span>
|
49 |
+
</span>
|
50 |
+
</div>
|
51 |
+
</div>
|
52 |
+
|
53 |
+
<div id="tab-content-wrapper">
|
54 |
+
<div id="tab-content-main" class="tab-content active flex-container">
|
55 |
+
<div id="editor">
|
56 |
+
<div id="editor-inputs">
|
57 |
+
<div id="editor-inputs-prompt" class="row">
|
58 |
+
<label for="prompt"><b>Enter Prompt</b></label> <small>or</small> <button id="promptsFromFileBtn" class="tertiaryButton">Load from a file</button>
|
59 |
+
<textarea id="prompt" class="col-free">a photograph of an astronaut riding a horse</textarea>
|
60 |
+
<input id="prompt_from_file" name="prompt_from_file" type="file" /> <!-- hidden -->
|
61 |
+
<label for="negative_prompt" class="collapsible" id="negative_prompt_handle">
|
62 |
+
Negative Prompt
|
63 |
+
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-prompts#negative-prompts" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top">Click to learn more about Negative Prompts</span></i></a>
|
64 |
+
<small>(optional)</small>
|
65 |
+
</label>
|
66 |
+
<div class="collapsible-content">
|
67 |
+
<textarea id="negative_prompt" name="negative_prompt" placeholder="list the things to remove from the image (e.g. fog, green)"></textarea>
|
68 |
+
</div>
|
69 |
+
</div>
|
70 |
+
|
71 |
+
<div id="editor-inputs-init-image" class="row">
|
72 |
+
<label for="init_image">Initial Image (img2img) <small>(optional)</small> </label>
|
73 |
+
|
74 |
+
<div id="init_image_preview_container" class="image_preview_container">
|
75 |
+
<div id="init_image_wrapper">
|
76 |
+
<img id="init_image_preview" src="" />
|
77 |
+
<span id="init_image_size_box" class="img_bottom_label"></span>
|
78 |
+
<button class="init_image_clear image_clear_btn"><i class="fa-solid fa-xmark"></i></button>
|
79 |
+
</div>
|
80 |
+
<div id="init_image_buttons">
|
81 |
+
<div class="button">
|
82 |
+
<i class="fa-regular fa-folder-open"></i>
|
83 |
+
Browse
|
84 |
+
<input id="init_image" name="init_image" type="file" />
|
85 |
+
</div>
|
86 |
+
<div id="init_image_button_draw" class="button">
|
87 |
+
<i class="fa-solid fa-pencil"></i>
|
88 |
+
Draw
|
89 |
+
</div>
|
90 |
+
<div id="inpaint_button_container">
|
91 |
+
<div id="init_image_button_inpaint" class="button">
|
92 |
+
<i class="fa-solid fa-paintbrush"></i>
|
93 |
+
Inpaint
|
94 |
+
</div>
|
95 |
+
<input id="enable_mask" name="enable_mask" type="checkbox">
|
96 |
+
</div>
|
97 |
+
</div>
|
98 |
+
</div>
|
99 |
+
|
100 |
+
<div id="apply_color_correction_setting" class="pl-5"><input id="apply_color_correction" name="apply_color_correction" type="checkbox"> <label for="apply_color_correction">Preserve color profile <small>(helps during inpainting)</small></label></div>
|
101 |
+
|
102 |
+
</div>
|
103 |
+
|
104 |
+
<div id="editor-inputs-tags-container" class="row">
|
105 |
+
<label>Image Modifiers <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip right">click an Image Modifier to remove it, right-click to temporarily disable it, use Ctrl+Mouse Wheel to adjust its weight</span></i></label>
|
106 |
+
<div id="editor-inputs-tags-list"></div>
|
107 |
+
</div>
|
108 |
+
|
109 |
+
<button id="makeImage" class="primaryButton">Make Image</button>
|
110 |
+
<div id="render-buttons">
|
111 |
+
<button id="stopImage" class="secondaryButton">Stop All</button>
|
112 |
+
<button id="pause"><i class="fa-solid fa-pause"></i> Pause All</button>
|
113 |
+
<button id="resume"><i class="fa-solid fa-play"></i> Resume</button>
|
114 |
+
</div>
|
115 |
+
</div>
|
116 |
+
|
117 |
+
<span class="line-separator"></span>
|
118 |
+
|
119 |
+
<div id="editor-settings" class="settings-box panel-box">
|
120 |
+
<h4 class="collapsible">
|
121 |
+
Image Settings
|
122 |
+
<i id="reset-image-settings" class="fa-solid fa-arrow-rotate-left section-button">
|
123 |
+
<span class="simple-tooltip top-left">
|
124 |
+
Reset Image Settings
|
125 |
+
</span>
|
126 |
+
</i>
|
127 |
+
</h4>
|
128 |
+
<div id="editor-settings-entries" class="collapsible-content">
|
129 |
+
<div><table>
|
130 |
+
<tr><b class="settings-subheader">Image Settings</b></tr>
|
131 |
+
<tr class="pl-5"><td><label for="seed">Seed:</label></td><td><input id="seed" name="seed" size="10" value="0" onkeypress="preventNonNumericalInput(event)"> <input id="random_seed" name="random_seed" type="checkbox" checked><label for="random_seed">Random</label></td></tr>
|
132 |
+
<tr class="pl-5"><td><label for="num_outputs_total">Number of Images:</label></td><td><input id="num_outputs_total" name="num_outputs_total" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label><small>(total)</small></label> <input id="num_outputs_parallel" name="num_outputs_parallel" value="1" size="1" onkeypress="preventNonNumericalInput(event)"> <label for="num_outputs_parallel"><small>(in parallel)</small></label></td></tr>
|
133 |
+
<tr class="pl-5"><td><label for="stable_diffusion_model">Model:</label></td><td class="model-input">
|
134 |
+
<input id="stable_diffusion_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
135 |
+
<button id="reload-models" class="secondaryButton reloadModels"><i class='fa-solid fa-rotate'></i></button>
|
136 |
+
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about custom models</span></i></a>
|
137 |
+
</td></tr>
|
138 |
+
<!-- <tr id="modelConfigSelection" class="pl-5"><td><label for="model_config">Model Config:</i></label></td><td>
|
139 |
+
<select id="model_config" name="model_config">
|
140 |
+
</select>
|
141 |
+
</td></tr> -->
|
142 |
+
<tr class="pl-5"><td><label for="vae_model">Custom VAE:</i></label></td><td>
|
143 |
+
<input id="vae_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
144 |
+
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about VAEs</span></i></a>
|
145 |
+
</td></tr>
|
146 |
+
<tr id="samplerSelection" class="pl-5"><td><label for="sampler_name">Sampler:</label></td><td>
|
147 |
+
<select id="sampler_name" name="sampler_name">
|
148 |
+
<option value="plms">PLMS</option>
|
149 |
+
<option value="ddim">DDIM</option>
|
150 |
+
<option value="heun">Heun</option>
|
151 |
+
<option value="euler">Euler</option>
|
152 |
+
<option value="euler_a" selected>Euler Ancestral</option>
|
153 |
+
<option value="dpm2">DPM2</option>
|
154 |
+
<option value="dpm2_a">DPM2 Ancestral</option>
|
155 |
+
<option value="lms">LMS</option>
|
156 |
+
<option value="dpm_solver_stability">DPM Solver (Stability AI)</option>
|
157 |
+
<option value="dpmpp_2s_a">DPM++ 2s Ancestral (Karras)</option>
|
158 |
+
<option value="dpmpp_2m">DPM++ 2m (Karras)</option>
|
159 |
+
<option value="dpmpp_sde">DPM++ SDE (Karras)</option>
|
160 |
+
<option value="dpm_fast">DPM Fast (Karras)</option>
|
161 |
+
<option value="dpm_adaptive">DPM Adaptive (Karras)</option>
|
162 |
+
<option value="unipc_snr">UniPC SNR</option>
|
163 |
+
<option value="unipc_tu">UniPC TU</option>
|
164 |
+
<option value="unipc_snr_2">UniPC SNR 2</option>
|
165 |
+
<option value="unipc_tu_2">UniPC TC 2</option>
|
166 |
+
<option value="unipc_tq">UniPC TQ</option>
|
167 |
+
</select>
|
168 |
+
<a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-to-Use#samplers" target="_blank"><i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">Click to learn more about samplers</span></i></a>
|
169 |
+
</td></tr>
|
170 |
+
<tr class="pl-5"><td><label>Image Size: </label></td><td>
|
171 |
+
<select id="width" name="width" value="512">
|
172 |
+
<option value="128">128 (*)</option>
|
173 |
+
<option value="192">192</option>
|
174 |
+
<option value="256">256 (*)</option>
|
175 |
+
<option value="320">320</option>
|
176 |
+
<option value="384">384</option>
|
177 |
+
<option value="448">448</option>
|
178 |
+
<option value="512" selected>512 (*)</option>
|
179 |
+
<option value="576">576</option>
|
180 |
+
<option value="640">640</option>
|
181 |
+
<option value="704">704</option>
|
182 |
+
<option value="768">768 (*)</option>
|
183 |
+
<option value="832">832</option>
|
184 |
+
<option value="896">896</option>
|
185 |
+
<option value="960">960</option>
|
186 |
+
<option value="1024">1024 (*)</option>
|
187 |
+
<option value="1280">1280</option>
|
188 |
+
<option value="1536">1536</option>
|
189 |
+
<option value="1792">1792</option>
|
190 |
+
<option value="2048">2048</option>
|
191 |
+
</select>
|
192 |
+
<label for="width"><small>(width)</small></label>
|
193 |
+
<select id="height" name="height" value="512">
|
194 |
+
<option value="128">128 (*)</option>
|
195 |
+
<option value="192">192</option>
|
196 |
+
<option value="256">256 (*)</option>
|
197 |
+
<option value="320">320</option>
|
198 |
+
<option value="384">384</option>
|
199 |
+
<option value="448">448</option>
|
200 |
+
<option value="512" selected>512 (*)</option>
|
201 |
+
<option value="576">576</option>
|
202 |
+
<option value="640">640</option>
|
203 |
+
<option value="704">704</option>
|
204 |
+
<option value="768">768 (*)</option>
|
205 |
+
<option value="832">832</option>
|
206 |
+
<option value="896">896</option>
|
207 |
+
<option value="960">960</option>
|
208 |
+
<option value="1024">1024 (*)</option>
|
209 |
+
<option value="1280">1280</option>
|
210 |
+
<option value="1536">1536</option>
|
211 |
+
<option value="1792">1792</option>
|
212 |
+
<option value="2048">2048</option>
|
213 |
+
</select>
|
214 |
+
<label for="height"><small>(height)</small></label>
|
215 |
+
<div id="small_image_warning" class="displayNone">Small image sizes can cause bad image quality</div>
|
216 |
+
</td></tr>
|
217 |
+
<tr class="pl-5"><td><label for="num_inference_steps">Inference Steps:</label></td><td> <input id="num_inference_steps" name="num_inference_steps" size="4" value="25" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
218 |
+
<tr class="pl-5"><td><label for="guidance_scale_slider">Guidance Scale:</label></td><td> <input id="guidance_scale_slider" name="guidance_scale_slider" class="editor-slider" value="75" type="range" min="11" max="500"> <input id="guidance_scale" name="guidance_scale" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"></td></tr>
|
219 |
+
<tr id="prompt_strength_container" class="pl-5"><td><label for="prompt_strength_slider">Prompt Strength:</label></td><td> <input id="prompt_strength_slider" name="prompt_strength_slider" class="editor-slider" value="80" type="range" min="0" max="99"> <input id="prompt_strength" name="prompt_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td></tr>
|
220 |
+
<tr class="pl-5"><td><label for="hypernetwork_model">Hypernetwork:</i></label></td><td>
|
221 |
+
<input id="hypernetwork_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" />
|
222 |
+
</td></tr>
|
223 |
+
<tr id="hypernetwork_strength_container" class="pl-5">
|
224 |
+
<td><label for="hypernetwork_strength_slider">Hypernetwork Strength:</label></td>
|
225 |
+
<td> <input id="hypernetwork_strength_slider" name="hypernetwork_strength_slider" class="editor-slider" value="100" type="range" min="0" max="100"> <input id="hypernetwork_strength" name="hypernetwork_strength" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)"><br/></td>
|
226 |
+
</tr>
|
227 |
+
<tr class="pl-5"><td><label for="output_format">Output Format:</label></td><td>
|
228 |
+
<select id="output_format" name="output_format">
|
229 |
+
<option value="jpeg" selected>jpeg</option>
|
230 |
+
<option value="png">png</option>
|
231 |
+
<option value="webp">webp</option>
|
232 |
+
</select>
|
233 |
+
</td></tr>
|
234 |
+
<tr class="pl-5" id="output_quality_row"><td><label for="output_quality">Image Quality:</label></td><td>
|
235 |
+
<input id="output_quality_slider" name="output_quality" class="editor-slider" value="75" type="range" min="10" max="95"> <input id="output_quality" name="output_quality" size="4" pattern="^[0-9\.]+$" onkeypress="preventNonNumericalInput(event)">
|
236 |
+
</td></tr>
|
237 |
+
</table></div>
|
238 |
+
|
239 |
+
<div><ul>
|
240 |
+
<li><b class="settings-subheader">Render Settings</b></li>
|
241 |
+
<li class="pl-5"><input id="stream_image_progress" name="stream_image_progress" type="checkbox"> <label for="stream_image_progress">Show a live preview <small>(uses more VRAM, slower images)</small></label></li>
|
242 |
+
<li class="pl-5"><input id="use_face_correction" name="use_face_correction" type="checkbox"> <label for="use_face_correction">Fix incorrect faces and eyes</label> <div style="display:inline-block;"><input id="gfpgan_model" type="text" spellcheck="false" autocomplete="off" class="model-filter" data-path="" /></div></li>
|
243 |
+
<li class="pl-5">
|
244 |
+
<input id="use_upscale" name="use_upscale" type="checkbox"> <label for="use_upscale">Scale up by</label>
|
245 |
+
<select id="upscale_amount" name="upscale_amount">
|
246 |
+
<option value="2">2x</option>
|
247 |
+
<option value="4" selected>4x</option>
|
248 |
+
</select>
|
249 |
+
with
|
250 |
+
<select id="upscale_model" name="upscale_model">
|
251 |
+
<option value="RealESRGAN_x4plus" selected>RealESRGAN_x4plus</option>
|
252 |
+
<option value="RealESRGAN_x4plus_anime_6B">RealESRGAN_x4plus_anime_6B</option>
|
253 |
+
</select>
|
254 |
+
</li>
|
255 |
+
<li class="pl-5"><input id="show_only_filtered_image" name="show_only_filtered_image" type="checkbox" checked> <label for="show_only_filtered_image">Show only the corrected/upscaled image</label></li>
|
256 |
+
</ul></div>
|
257 |
+
</div>
|
258 |
+
</div>
|
259 |
+
|
260 |
+
<div id="editor-modifiers" class="panel-box">
|
261 |
+
<h4 class="collapsible">
|
262 |
+
Image Modifiers (art styles, tags etc)
|
263 |
+
<i id="modifier-settings-btn" class="fa-solid fa-gear section-button">
|
264 |
+
<span class="simple-tooltip left">
|
265 |
+
Add Custom Modifiers
|
266 |
+
</span>
|
267 |
+
</i>
|
268 |
+
</h4>
|
269 |
+
<div id="editor-modifiers-entries" class="collapsible-content">
|
270 |
+
<div id="editor-modifiers-entries-toolbar">
|
271 |
+
<label for="preview-image">Image Style:</label>
|
272 |
+
<select id="preview-image" name="preview-image" value="portrait">
|
273 |
+
<option value="portrait" selected="">Face</option>
|
274 |
+
<option value="landscape">Landscape</option>
|
275 |
+
</select>
|
276 |
+
|
277 |
+
<label for="modifier-card-size-slider">Thumbnail Size:</label>
|
278 |
+
<input id="modifier-card-size-slider" name="modifier-card-size-slider" value="0" type="range" min="-3" max="5">
|
279 |
+
</div>
|
280 |
+
</div>
|
281 |
+
</div>
|
282 |
+
</div>
|
283 |
+
|
284 |
+
<div id="preview" class="col-free">
|
285 |
+
<div id="initial-text">
|
286 |
+
Type a prompt and press the "Make Image" button.<br/><br/>You can set an "Initial Image" if you want to guide the AI.<br/><br/>
|
287 |
+
You can also add modifiers like "Realistic", "Pencil Sketch", "ArtStation" etc by browsing through the "Image Modifiers" section
|
288 |
+
and selecting the desired modifiers.<br/><br/>
|
289 |
+
Click "Image Settings" for additional settings like seed, image size, number of images to generate etc.<br/><br/>Enjoy! :)
|
290 |
+
</div>
|
291 |
+
|
292 |
+
<div id="preview-content">
|
293 |
+
<div id="preview-tools">
|
294 |
+
<button id="clear-all-previews" class="secondaryButton"><i class="fa-solid fa-trash-can icon"></i> Clear All</button>
|
295 |
+
<button class="tertiaryButton" id="show-download-popup"><i class="fa-solid fa-download"></i> Download images</button>
|
296 |
+
<div class="display-settings">
|
297 |
+
<span class="auto-scroll"></span> <!-- hack for Rabbit Hole update -->
|
298 |
+
<button id="auto_scroll_btn" class="tertiaryButton">
|
299 |
+
<i class="fa-solid fa-arrows-up-to-line icon"></i>
|
300 |
+
<input id="auto_scroll" name="auto_scroll" type="checkbox" style="display: none">
|
301 |
+
<span class="simple-tooltip left">
|
302 |
+
Scroll to generated image (<span class="state">OFF</span>)
|
303 |
+
</span>
|
304 |
+
</button>
|
305 |
+
<button class="dropdown tertiaryButton">
|
306 |
+
<i class="fa-solid fa-magnifying-glass-plus icon dropbtn"></i>
|
307 |
+
<span class="simple-tooltip left">
|
308 |
+
Image Size
|
309 |
+
</span>
|
310 |
+
</button>
|
311 |
+
<div class="dropdown-content">
|
312 |
+
<div class="dropdown-item">
|
313 |
+
<input id="thumbnail_size" name="thumbnail_size" class="editor-slider" type="range" value="70" min="5" max="200" oninput="sliderUpdate(event)">
|
314 |
+
<input id="thumbnail_size-input" name="thumbnail_size-input" size="3" value="70" pattern="^[0-9.]+$" onkeypress="preventNonNumericalInput(event)" oninput="sliderUpdate(event)"> %
|
315 |
+
</div>
|
316 |
+
</div>
|
317 |
+
</div>
|
318 |
+
<div class="clearfix" style="clear: both;"></div>
|
319 |
+
</div>
|
320 |
+
</div>
|
321 |
+
</div>
|
322 |
+
</div>
|
323 |
+
|
324 |
+
<div id="tab-content-settings" class="tab-content">
|
325 |
+
<div id="system-settings" class="tab-content-inner">
|
326 |
+
<h1>System Settings</h1>
|
327 |
+
<div class="parameters-table"></div>
|
328 |
+
<br/>
|
329 |
+
<button id="save-system-settings-btn" class="primaryButton">Save</button>
|
330 |
+
<br/><br/>
|
331 |
+
<div>
|
332 |
+
<h3><i class="fa fa-microchip icon"></i> System Info</h3>
|
333 |
+
<div id="system-info">
|
334 |
+
<table>
|
335 |
+
<tr><td><label>Processor:</label></td><td id="system-info-cpu" class="value"></td></tr>
|
336 |
+
<tr><td><label>Compatible Graphics Cards (all):</label></td><td id="system-info-gpus-all" class="value"></td></tr>
|
337 |
+
<tr><td></td><td> </td></tr>
|
338 |
+
<tr><td><label>Used for rendering 🔥:</label></td><td id="system-info-rendering-devices" class="value"></td></tr>
|
339 |
+
<tr><td><label>Server Addresses <i class="fa-solid fa-circle-question help-btn"><span class="simple-tooltip top-left">You can access Stable Diffusion UI from other devices using these addresses</span></i> :</label></td><td id="system-info-server-hosts" class="value"></td></tr>
|
340 |
+
</table>
|
341 |
+
</div>
|
342 |
+
</div>
|
343 |
+
|
344 |
+
</div>
|
345 |
+
</div>
|
346 |
+
<div id="tab-content-about" class="tab-content">
|
347 |
+
<div class="tab-content-inner">
|
348 |
+
<div class="float-container">
|
349 |
+
<div class="float-child">
|
350 |
+
<h1>Help</h1>
|
351 |
+
<ul id="help-links">
|
352 |
+
<li><span class="help-section">Using the software</span>
|
353 |
+
<ul>
|
354 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/How-To-Use" target="_blank"><i class="fa-solid fa-book fa-fw"></i> How to use</a>
|
355 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Overview" target="_blank"><i class="fa-solid fa-list fa-fw"></i> UI Overview</a>
|
356 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Writing-Prompts" target="_blank"><i class="fa-solid fa-pen-to-square fa-fw"></i> Writing prompts</a>
|
357 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Inpainting" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Inpainting</a>
|
358 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Run-on-Multiple-GPUs" target="_blank"><i class="fa-solid fa-paintbrush fa-fw"></i> Run on Multiple GPUs</a>
|
359 |
+
</ul>
|
360 |
+
|
361 |
+
<li><span class="help-section">Installation</span>
|
362 |
+
<ul>
|
363 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Troubleshooting" target="_blank"><i class="fa-solid fa-circle-question fa-fw"></i> Troubleshooting</a>
|
364 |
+
</ul>
|
365 |
+
|
366 |
+
<li><span class="help-section">Downloadable Content</span>
|
367 |
+
<ul>
|
368 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/Custom-Models" target="_blank"><i class="fa-solid fa-images fa-fw"></i> Custom Models</a>
|
369 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/UI-Plugins" target="_blank"><i class="fa-solid fa-puzzle-piece fa-fw"></i> UI Plugins</a>
|
370 |
+
<li> <a href="https://github.com/cmdr2/stable-diffusion-ui/wiki/VAE-Variational-Auto-Encoder" target="_blank"><i class="fa-solid fa-hand-sparkles fa-fw"></i> VAE Variational Auto Encoder</a>
|
371 |
+
</ul>
|
372 |
+
</ul>
|
373 |
+
</div>
|
374 |
+
|
375 |
+
<div class="float-child">
|
376 |
+
<h1>Community</h1>
|
377 |
+
<ul id="community-links">
|
378 |
+
<li><a href="https://discord.com/invite/u9yhsFmEkB" target="_blank"><i class="fa-brands fa-discord fa-fw"></i> Discord user community</a></li>
|
379 |
+
<li><a href="https://www.reddit.com/r/StableDiffusionUI/" target="_blank"><i class="fa-brands fa-reddit fa-fw"></i> Reddit community</a></li>
|
380 |
+
<li><a href="https://github.com/cmdr2/stable-diffusion-ui" target="_blank"><i class="fa-brands fa-github fa-fw"></i> Source code on GitHub</a></li>
|
381 |
+
</ul>
|
382 |
+
</div>
|
383 |
+
</div>
|
384 |
+
</div>
|
385 |
+
</div>
|
386 |
+
</div>
|
387 |
+
|
388 |
+
|
389 |
+
<div class="popup" id="download-images-popup">
|
390 |
+
<div>
|
391 |
+
<i class="close-button fa-solid fa-xmark"></i>
|
392 |
+
<h1>Download all images</h1>
|
393 |
+
<div class="parameters-table">
|
394 |
+
<div>
|
395 |
+
<div><i class="fa fa-file-zipper"></i></div>
|
396 |
+
<div><label for="theme">Download as a ZIP file</label><small>Instead of downloading individual files, generate one zip file with all images</small></div>
|
397 |
+
<div><div class="input-toggle"><input id="zip_toggle" name="zip_toggle" checked="" type="checkbox"><label for="zip_toggle"></label></div></div>
|
398 |
+
</div>
|
399 |
+
<div id="download-add-folders">
|
400 |
+
<div><i class="fa fa-folder-tree"></i></div>
|
401 |
+
<div><label for="theme">Add per-job folders</label><small>Place images into job folders</small></div>
|
402 |
+
<div><div class="input-toggle"><input id="tree_toggle" name="tree_toggle" checked="" type="checkbox"><label for="tree_toggle"></label></div></div>
|
403 |
+
</div>
|
404 |
+
<div>
|
405 |
+
<div><i class="fa fa-sliders"></i></div>
|
406 |
+
<div><label for="theme">Add metadata files</label><small>For each image, also download a JSON file with all the settings used to generate the image</small></div>
|
407 |
+
<div><div class="input-toggle"><input id="json_toggle" name="json_toggle" checked="" type="checkbox"><label for="json_toggle"></label></div></div>
|
408 |
+
</div>
|
409 |
+
</div>
|
410 |
+
<br/>
|
411 |
+
<button id="save-all-images" class="primaryButton"><i class="fa-solid fa-images"></i> Start download</button>
|
412 |
+
</div>
|
413 |
+
</div>
|
414 |
+
<div id="save-settings-config" class="popup">
|
415 |
+
<div>
|
416 |
+
<i class="close-button fa-solid fa-xmark"></i>
|
417 |
+
<h1>Save Settings Configuration</h1>
|
418 |
+
<p>Select which settings should be remembered when restarting the browser</p>
|
419 |
+
<table id="save-settings-config-table" class="form-table">
|
420 |
+
</table>
|
421 |
+
</div>
|
422 |
+
</div>
|
423 |
+
|
424 |
+
<div id="modifier-settings-config" class="popup" tabindex="0">
|
425 |
+
<div>
|
426 |
+
<i class="close-button fa-solid fa-xmark"></i>
|
427 |
+
<h1>Modifier Settings</h1>
|
428 |
+
<p>Set your custom modifiers (one per line)</p>
|
429 |
+
<textarea id="custom-modifiers-input" placeholder="Enter your custom modifiers, one-per-line" spellcheck="false"></textarea>
|
430 |
+
<p><small><b>Tip:</b> You can include special characters like {} () [] and |. You can also put multiple comma-separated phrases in a single line, to make a single modifier that combines all of those.</small></p>
|
431 |
+
</div>
|
432 |
+
</div>
|
433 |
+
|
434 |
+
<div id="image-editor" class="popup image-editor-popup">
|
435 |
+
<div>
|
436 |
+
<i class="close-button fa-solid fa-xmark"></i>
|
437 |
+
<h1>Image Editor</h1>
|
438 |
+
<div class="flex-container">
|
439 |
+
<div class="editor-controls-left"></div>
|
440 |
+
<div class="editor-controls-center">
|
441 |
+
<div></div>
|
442 |
+
</div>
|
443 |
+
<div class="editor-controls-right">
|
444 |
+
<div></div>
|
445 |
+
</div>
|
446 |
+
</div>
|
447 |
+
</div>
|
448 |
+
</div>
|
449 |
+
|
450 |
+
<div id="image-inpainter" class="popup image-editor-popup">
|
451 |
+
<div>
|
452 |
+
<i class="close-button fa-solid fa-xmark"></i>
|
453 |
+
<h1>Inpainter</h1>
|
454 |
+
<div class="flex-container">
|
455 |
+
<div class="editor-controls-left"></div>
|
456 |
+
<div class="editor-controls-center">
|
457 |
+
<div></div>
|
458 |
+
</div>
|
459 |
+
<div class="editor-controls-right">
|
460 |
+
<div></div>
|
461 |
+
</div>
|
462 |
+
</div>
|
463 |
+
</div>
|
464 |
+
</div>
|
465 |
+
|
466 |
+
<div id="footer-spacer"></div>
|
467 |
+
<div id="footer">
|
468 |
+
<div class="line-separator"> </div>
|
469 |
+
<p>If you found this project useful and want to help keep it alive, please <a href="https://ko-fi.com/cmdr2_stablediffusion_ui" target="_blank"><img src="/media/images/kofi.png" id="coffeeButton"></a> to help cover the cost of development and maintenance! Thank you for your support!</p>
|
470 |
+
<p>Please feel free to join the <a href="https://discord.com/invite/u9yhsFmEkB" target="_blank">discord community</a> or <a href="https://github.com/cmdr2/stable-diffusion-ui/issues" target="_blank">file an issue</a> if you have any problems or suggestions in using this interface.</p>
|
471 |
+
<div id="footer-legal">
|
472 |
+
<p><b>Disclaimer:</b> The authors of this project are not responsible for any content generated using this interface.</p>
|
473 |
+
<p>This license of this software forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, <br/>spread misinformation and target vulnerable groups. For the full list of restrictions please read <a href="https://github.com/cmdr2/stable-diffusion-ui/blob/main/LICENSE" target="_blank">the license</a>.</p>
|
474 |
+
<p>By using this software, you consent to the terms and conditions of the license.</p>
|
475 |
+
</div>
|
476 |
+
</div>
|
477 |
+
</div>
|
478 |
+
</body>
|
479 |
+
<script src="media/js/utils.js"></script>
|
480 |
+
<script src="media/js/engine.js"></script>
|
481 |
+
<script src="media/js/parameters.js"></script>
|
482 |
+
<script src="media/js/plugins.js"></script>
|
483 |
+
|
484 |
+
<script src="media/js/image-modifiers.js"></script>
|
485 |
+
<script src="media/js/auto-save.js"></script>
|
486 |
+
|
487 |
+
<script src="media/js/searchable-models.js"></script>
|
488 |
+
<script src="media/js/main.js"></script>
|
489 |
+
<script src="media/js/themes.js"></script>
|
490 |
+
<script src="media/js/dnd.js"></script>
|
491 |
+
<script src="media/js/image-editor.js"></script>
|
492 |
+
<script src="media/js/image-modal.js"></script>
|
493 |
+
<script>
|
494 |
+
async function init() {
|
495 |
+
await initSettings()
|
496 |
+
await getModels()
|
497 |
+
await getAppConfig()
|
498 |
+
await loadUIPlugins()
|
499 |
+
await loadModifiers()
|
500 |
+
await getSystemInfo()
|
501 |
+
|
502 |
+
SD.init({
|
503 |
+
events: {
|
504 |
+
statusChange: setServerStatus,
|
505 |
+
idle: onIdle
|
506 |
+
}
|
507 |
+
})
|
508 |
+
|
509 |
+
playSound()
|
510 |
+
}
|
511 |
+
|
512 |
+
init()
|
513 |
+
</script>
|
514 |
+
</html>
|
ui/main.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from easydiffusion import model_manager, app, server
|
2 |
+
from easydiffusion.server import server_api # required for uvicorn
|
3 |
+
|
4 |
+
# Init the app
|
5 |
+
model_manager.init()
|
6 |
+
app.init()
|
7 |
+
server.init()
|
8 |
+
|
9 |
+
# start the browser ui
|
10 |
+
app.open_browser()
|
ui/media/css/auto-save.css
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Auto-Settings Styling */
|
2 |
+
#auto_save_settings ~ button {
|
3 |
+
margin: 5px;
|
4 |
+
}
|
5 |
+
#auto_save_settings:not(:checked) ~ button {
|
6 |
+
display: none;
|
7 |
+
}
|
8 |
+
|
9 |
+
.form-table {
|
10 |
+
margin: auto;
|
11 |
+
}
|
12 |
+
|
13 |
+
.form-table th {
|
14 |
+
padding-top: 15px;
|
15 |
+
padding-bottom: 5px;
|
16 |
+
}
|
17 |
+
|
18 |
+
.form-table td:first-child > *,
|
19 |
+
.form-table th:first-child > * {
|
20 |
+
float: right;
|
21 |
+
white-space: nowrap;
|
22 |
+
}
|
23 |
+
|
24 |
+
.form-table td:last-child > *,
|
25 |
+
.form-table th:last-child > * {
|
26 |
+
float: left;
|
27 |
+
}
|
28 |
+
|
29 |
+
|
30 |
+
.parameters-table {
|
31 |
+
display: flex;
|
32 |
+
flex-direction: column;
|
33 |
+
gap: 1px;
|
34 |
+
}
|
35 |
+
|
36 |
+
.parameters-table > div {
|
37 |
+
background: var(--background-color2);
|
38 |
+
display: flex;
|
39 |
+
padding: 0px 4px;
|
40 |
+
}
|
41 |
+
|
42 |
+
.parameters-table > div > div {
|
43 |
+
padding: 10px;
|
44 |
+
display: flex;
|
45 |
+
align-items: center;
|
46 |
+
justify-content: center;
|
47 |
+
}
|
48 |
+
|
49 |
+
.parameters-table small {
|
50 |
+
color: rgb(153, 153, 153);
|
51 |
+
}
|
52 |
+
|
53 |
+
.parameters-table > div > div:nth-child(1) {
|
54 |
+
font-size: 20px;
|
55 |
+
width: 45px;
|
56 |
+
}
|
57 |
+
|
58 |
+
.parameters-table > div > div:nth-child(2) {
|
59 |
+
flex: 1;
|
60 |
+
flex-direction: column;
|
61 |
+
text-align: left;
|
62 |
+
justify-content: center;
|
63 |
+
align-items: start;
|
64 |
+
gap: 4px;
|
65 |
+
}
|
66 |
+
|
67 |
+
.parameters-table > div > div:nth-child(3) {
|
68 |
+
text-align: right;
|
69 |
+
}
|
70 |
+
|
71 |
+
.parameters-table > div:first-child {
|
72 |
+
border-radius: 12px 12px 0px 0px;
|
73 |
+
}
|
74 |
+
|
75 |
+
.parameters-table > div:last-child {
|
76 |
+
border-radius: 0px 0px 12px 12px;
|
77 |
+
}
|
78 |
+
|
79 |
+
.parameters-table .fa-fire {
|
80 |
+
color: #F7630C;
|
81 |
+
}
|
ui/media/css/fontawesome-all.min.css
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ui/media/css/fonts.css
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* work-sans-regular - latin */
|
2 |
+
@font-face {
|
3 |
+
font-family: 'Work Sans';
|
4 |
+
font-style: normal;
|
5 |
+
font-weight: 400;
|
6 |
+
src: local(''),
|
7 |
+
url('/media/fonts/work-sans-v18-latin-regular.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
|
8 |
+
url('/media/fonts/work-sans-v18-latin-regular.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
|
9 |
+
}
|
10 |
+
|
11 |
+
/* work-sans-600 - latin */
|
12 |
+
@font-face {
|
13 |
+
font-family: 'Work Sans';
|
14 |
+
font-style: normal;
|
15 |
+
font-weight: 600;
|
16 |
+
src: local(''),
|
17 |
+
url('/media/fonts/work-sans-v18-latin-600.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
|
18 |
+
url('/media/fonts/work-sans-v18-latin-600.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
|
19 |
+
}
|
20 |
+
|
21 |
+
/* work-sans-700 - latin */
|
22 |
+
@font-face {
|
23 |
+
font-family: 'Work Sans';
|
24 |
+
font-style: normal;
|
25 |
+
font-weight: 700;
|
26 |
+
src: local(''),
|
27 |
+
url('/media/fonts/work-sans-v18-latin-700.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
|
28 |
+
url('/media/fonts/work-sans-v18-latin-700.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
|
29 |
+
}
|
30 |
+
|
31 |
+
/* work-sans-800 - latin */
|
32 |
+
@font-face {
|
33 |
+
font-family: 'Work Sans';
|
34 |
+
font-style: normal;
|
35 |
+
font-weight: 800;
|
36 |
+
src: local(''),
|
37 |
+
url('/media/fonts/work-sans-v18-latin-800.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */
|
38 |
+
url('/media/fonts/work-sans-v18-latin-800.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */
|
39 |
+
}
|
40 |
+
|