hysts HF staff commited on
Commit
928d246
1 Parent(s): 18033d1
Files changed (3) hide show
  1. .pre-commit-config.yaml +10 -0
  2. app.py +18 -8
  3. notebook.ipynb +119 -0
.pre-commit-config.yaml CHANGED
@@ -34,3 +34,13 @@ repos:
34
  hooks:
35
  - id: yapf
36
  args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
34
  hooks:
35
  - id: yapf
36
  args: ['--parallel', '--in-place']
37
+ - repo: https://github.com/kynan/nbstripout
38
+ rev: 0.6.0
39
+ hooks:
40
+ - id: nbstripout
41
+ args: ['--extra-keys', 'metadata.interpreter metadata.kernelspec cell.metadata.pycharm']
42
+ - repo: https://github.com/nbQA-dev/nbQA
43
+ rev: 1.7.0
44
+ hooks:
45
+ - id: nbqa-isort
46
+ - id: nbqa-yapf
app.py CHANGED
@@ -21,6 +21,7 @@ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
21
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
  USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
  ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
 
24
 
25
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
26
  if torch.cuda.is_available():
@@ -29,23 +30,30 @@ if torch.cuda.is_available():
29
  torch_dtype=torch.float16,
30
  use_safetensors=True,
31
  variant='fp16')
32
- refiner = DiffusionPipeline.from_pretrained(
33
- 'stabilityai/stable-diffusion-xl-refiner-1.0',
34
- torch_dtype=torch.float16,
35
- use_safetensors=True,
36
- variant='fp16')
 
37
 
38
  if ENABLE_CPU_OFFLOAD:
39
  pipe.enable_model_cpu_offload()
40
- refiner.enable_model_cpu_offload()
 
41
  else:
42
  pipe.to(device)
43
- refiner.to(device)
 
44
 
45
  if USE_TORCH_COMPILE:
46
  pipe.unet = torch.compile(pipe.unet,
47
  mode='reduce-overhead',
48
  fullgraph=True)
 
 
 
 
49
  else:
50
  pipe = None
51
  refiner = None
@@ -182,7 +190,9 @@ with gr.Blocks(css='style.css') as demo:
182
  step=32,
183
  value=1024,
184
  )
185
- apply_refiner = gr.Checkbox(label='Apply refiner', value=False)
 
 
186
  with gr.Row():
187
  guidance_scale_base = gr.Slider(label='Guidance scale for base',
188
  minimum=1,
 
21
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
  USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
  ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
24
+ ENABLE_REFINER = os.getenv('ENABLE_REFINER', '1') == '1'
25
 
26
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
27
  if torch.cuda.is_available():
 
30
  torch_dtype=torch.float16,
31
  use_safetensors=True,
32
  variant='fp16')
33
+ if ENABLE_REFINER:
34
+ refiner = DiffusionPipeline.from_pretrained(
35
+ 'stabilityai/stable-diffusion-xl-refiner-1.0',
36
+ torch_dtype=torch.float16,
37
+ use_safetensors=True,
38
+ variant='fp16')
39
 
40
  if ENABLE_CPU_OFFLOAD:
41
  pipe.enable_model_cpu_offload()
42
+ if ENABLE_REFINER:
43
+ refiner.enable_model_cpu_offload()
44
  else:
45
  pipe.to(device)
46
+ if ENABLE_REFINER:
47
+ refiner.to(device)
48
 
49
  if USE_TORCH_COMPILE:
50
  pipe.unet = torch.compile(pipe.unet,
51
  mode='reduce-overhead',
52
  fullgraph=True)
53
+ if ENABLE_REFINER:
54
+ refiner.unet = torch.compile(refiner.unet,
55
+ mode='reduce-overhead',
56
+ fullgraph=True)
57
  else:
58
  pipe = None
59
  refiner = None
 
190
  step=32,
191
  value=1024,
192
  )
193
+ apply_refiner = gr.Checkbox(label='Apply refiner',
194
+ value=False,
195
+ visible=ENABLE_REFINER)
196
  with gr.Row():
197
  guidance_scale_base = gr.Slider(label='Guidance scale for base',
198
  minimum=1,
notebook.ipynb ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "-ayiROzqJeXB",
11
+ "outputId": "c7710012-c311-4203-9dc8-6333f0eb0e66"
12
+ },
13
+ "outputs": [],
14
+ "source": [
15
+ "!git clone -q https://huggingface.co/spaces/hysts/SD-XL"
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": null,
21
+ "metadata": {
22
+ "colab": {
23
+ "base_uri": "https://localhost:8080/"
24
+ },
25
+ "id": "d-GcdYVAJmt0",
26
+ "outputId": "d7489bf9-df54-4e96-ed1e-cf392f34b890"
27
+ },
28
+ "outputs": [],
29
+ "source": [
30
+ "%cd SD-XL"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {
37
+ "id": "vTWR-Xr6JoBJ"
38
+ },
39
+ "outputs": [],
40
+ "source": [
41
+ "!pip install -q -r requirements.txt"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {
48
+ "id": "QSF_GqTKJsN5"
49
+ },
50
+ "outputs": [],
51
+ "source": [
52
+ "import os\n",
53
+ "\n",
54
+ "os.environ['ENABLE_REFINER'] = '0'"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": null,
60
+ "metadata": {
61
+ "colab": {
62
+ "base_uri": "https://localhost:8080/",
63
+ "height": 710,
64
+ "referenced_widgets": [
65
+ "68c1e33d84b94f009db258e278fe7068",
66
+ "b1b1ca6d1cc44a738c3b4b6de17f3a5b",
67
+ "104833166be14046873bfea2c1a2a887",
68
+ "32f25821a48d4c9589f58c134e3b56d7",
69
+ "3ed7cc7759074df58a91fd7fb28a4933",
70
+ "c8885bd4a35d4cdcbb6acce5c52e15e2",
71
+ "5d1d83dfd090460d9f948b71f95aaed8",
72
+ "773e06ed1d734e53a7def5305cd35131",
73
+ "753b336dbeb147349e4520715035d8da",
74
+ "c5215236213242b89a971a1095afcea5",
75
+ "bd0a6a0e16944533b59eaa3f5188e99f",
76
+ "96b1de32a367400bba75babd39bc7308",
77
+ "65291f8203964f4499a1b422af91f75e",
78
+ "0c3fad2a850b4320b47586ff4d0ac73e",
79
+ "69a6be1033c5424988a702c5d69590ee",
80
+ "b22729413d9b449a94892b91d95cf1e4",
81
+ "6c8f51c69f394eeea67eb515831f60b2",
82
+ "bb779e8367e44a939d607ace70493d94",
83
+ "4d3862b22c3245d8b3d8b6442e149c8d",
84
+ "16ef5a40c9d441aea180d1732442df97",
85
+ "db54ca7070cf43adbda196d44967464c",
86
+ "cadddb2624804c308710a219bf8cf4f3"
87
+ ]
88
+ },
89
+ "id": "4FTmJkt_J8j_",
90
+ "outputId": "850aba86-acb4-4452-bac2-28b5c815ec0f"
91
+ },
92
+ "outputs": [],
93
+ "source": [
94
+ "import app"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": null,
100
+ "metadata": {
101
+ "id": "LJbHj7yLJ9p0"
102
+ },
103
+ "outputs": [],
104
+ "source": []
105
+ }
106
+ ],
107
+ "metadata": {
108
+ "accelerator": "GPU",
109
+ "colab": {
110
+ "gpuType": "T4",
111
+ "provenance": []
112
+ },
113
+ "language_info": {
114
+ "name": "python"
115
+ }
116
+ },
117
+ "nbformat": 4,
118
+ "nbformat_minor": 0
119
+ }