hysts HF staff commited on
Commit
89539f7
1 Parent(s): 928d246

Migrate from yapf to black

Browse files
Files changed (5) hide show
  1. .pre-commit-config.yaml +19 -15
  2. .style.yapf +0 -5
  3. .vscode/settings.json +11 -8
  4. app.py +127 -113
  5. notebook.ipynb +3 -41
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -8,39 +8,43 @@ repos:
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
11
- - id: double-quote-string-fixer
12
  - id: end-of-file-fixer
13
  - id: mixed-line-ending
14
- args: ['--fix=lf']
15
  - id: requirements-txt-fixer
16
  - id: trailing-whitespace
17
  - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
  hooks:
20
  - id: docformatter
21
- args: ['--in-place']
22
  - repo: https://github.com/pycqa/isort
23
  rev: 5.12.0
24
  hooks:
25
  - id: isort
 
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
  hooks:
29
  - id: mypy
30
- args: ['--ignore-missing-imports']
31
- additional_dependencies: ['types-python-slugify']
32
- - repo: https://github.com/google/yapf
33
- rev: v0.32.0
34
  hooks:
35
- - id: yapf
36
- args: ['--parallel', '--in-place']
 
37
  - repo: https://github.com/kynan/nbstripout
38
- rev: 0.6.0
39
  hooks:
40
  - id: nbstripout
41
- args: ['--extra-keys', 'metadata.interpreter metadata.kernelspec cell.metadata.pycharm']
42
  - repo: https://github.com/nbQA-dev/nbQA
43
  rev: 1.7.0
44
  hooks:
 
 
 
45
  - id: nbqa-isort
46
- - id: nbqa-yapf
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
 
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
 
11
  - id: end-of-file-fixer
12
  - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
  - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
  hooks:
19
  - id: docformatter
20
+ args: ["--in-place"]
21
  - repo: https://github.com/pycqa/isort
22
  rev: 5.12.0
23
  hooks:
24
  - id: isort
25
+ args: ["--profile", "black"]
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.5.1
28
  hooks:
29
  - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies: ["types-python-slugify", "types-requests", "types-PyYAML"]
32
+ - repo: https://github.com/psf/black
33
+ rev: 23.7.0
34
  hooks:
35
+ - id: black
36
+ language_version: python3.10
37
+ args: ["--line-length", "119"]
38
  - repo: https://github.com/kynan/nbstripout
39
+ rev: 0.6.1
40
  hooks:
41
  - id: nbstripout
42
+ args: ["--extra-keys", "metadata.interpreter metadata.kernelspec cell.metadata.pycharm"]
43
  - repo: https://github.com/nbQA-dev/nbQA
44
  rev: 1.7.0
45
  hooks:
46
+ - id: nbqa-black
47
+ - id: nbqa-pyupgrade
48
+ args: ["--py37-plus"]
49
  - id: nbqa-isort
50
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json CHANGED
@@ -1,18 +1,21 @@
1
  {
2
- "python.linting.enabled": true,
3
- "python.linting.flake8Enabled": true,
4
- "python.linting.pylintEnabled": false,
5
- "python.linting.lintOnSave": true,
6
- "python.formatting.provider": "yapf",
7
- "python.formatting.yapfArgs": [
8
- "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
- ],
10
  "[python]": {
 
11
  "editor.formatOnType": true,
12
  "editor.codeActionsOnSave": {
13
  "source.organizeImports": true
14
  }
15
  },
 
 
 
 
 
 
 
 
 
 
16
  "editor.formatOnSave": true,
17
  "files.insertFinalNewline": true
18
  }
 
1
  {
 
 
 
 
 
 
 
 
2
  "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter",
4
  "editor.formatOnType": true,
5
  "editor.codeActionsOnSave": {
6
  "source.organizeImports": true
7
  }
8
  },
9
+ "black-formatter.args": [
10
+ "--line-length=119"
11
+ ],
12
+ "isort.args": ["--profile", "black"],
13
+ "flake8.args": [
14
+ "--max-line-length=119"
15
+ ],
16
+ "ruff.args": [
17
+ "--line-length=119"
18
+ ],
19
  "editor.formatOnSave": true,
20
  "files.insertFinalNewline": true
21
  }
app.py CHANGED
@@ -11,31 +11,32 @@ import PIL.Image
11
  import torch
12
  from diffusers import DiffusionPipeline
13
 
14
- DESCRIPTION = '# SD-XL'
15
  if not torch.cuda.is_available():
16
- DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
20
- 'CACHE_EXAMPLES') == '1'
21
- MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
- USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
- ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
24
- ENABLE_REFINER = os.getenv('ENABLE_REFINER', '1') == '1'
25
 
26
- device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
27
  if torch.cuda.is_available():
28
  pipe = DiffusionPipeline.from_pretrained(
29
- 'stabilityai/stable-diffusion-xl-base-1.0',
30
  torch_dtype=torch.float16,
31
  use_safetensors=True,
32
- variant='fp16')
 
33
  if ENABLE_REFINER:
34
  refiner = DiffusionPipeline.from_pretrained(
35
- 'stabilityai/stable-diffusion-xl-refiner-1.0',
36
  torch_dtype=torch.float16,
37
  use_safetensors=True,
38
- variant='fp16')
 
39
 
40
  if ENABLE_CPU_OFFLOAD:
41
  pipe.enable_model_cpu_offload()
@@ -47,13 +48,9 @@ if torch.cuda.is_available():
47
  refiner.to(device)
48
 
49
  if USE_TORCH_COMPILE:
50
- pipe.unet = torch.compile(pipe.unet,
51
- mode='reduce-overhead',
52
- fullgraph=True)
53
  if ENABLE_REFINER:
54
- refiner.unet = torch.compile(refiner.unet,
55
- mode='reduce-overhead',
56
- fullgraph=True)
57
  else:
58
  pipe = None
59
  refiner = None
@@ -65,21 +62,23 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
65
  return seed
66
 
67
 
68
- def generate(prompt: str,
69
- negative_prompt: str = '',
70
- prompt_2: str = '',
71
- negative_prompt_2: str = '',
72
- use_negative_prompt: bool = False,
73
- use_prompt_2: bool = False,
74
- use_negative_prompt_2: bool = False,
75
- seed: int = 0,
76
- width: int = 1024,
77
- height: int = 1024,
78
- guidance_scale_base: float = 5.0,
79
- guidance_scale_refiner: float = 5.0,
80
- num_inference_steps_base: int = 50,
81
- num_inference_steps_refiner: int = 50,
82
- apply_refiner: bool = False) -> PIL.Image.Image:
 
 
83
  generator = torch.Generator().manual_seed(seed)
84
 
85
  if not use_negative_prompt:
@@ -90,140 +89,153 @@ def generate(prompt: str,
90
  negative_prompt_2 = None # type: ignore
91
 
92
  if not apply_refiner:
93
- return pipe(prompt=prompt,
94
- negative_prompt=negative_prompt,
95
- prompt_2=prompt_2,
96
- negative_prompt_2=negative_prompt_2,
97
- width=width,
98
- height=height,
99
- guidance_scale=guidance_scale_base,
100
- num_inference_steps=num_inference_steps_base,
101
- generator=generator,
102
- output_type='pil').images[0]
 
 
103
  else:
104
- latents = pipe(prompt=prompt,
105
- negative_prompt=negative_prompt,
106
- prompt_2=prompt_2,
107
- negative_prompt_2=negative_prompt_2,
108
- width=width,
109
- height=height,
110
- guidance_scale=guidance_scale_base,
111
- num_inference_steps=num_inference_steps_base,
112
- generator=generator,
113
- output_type='latent').images
114
- image = refiner(prompt=prompt,
115
- negative_prompt=negative_prompt,
116
- prompt_2=prompt_2,
117
- negative_prompt_2=negative_prompt_2,
118
- guidance_scale=guidance_scale_refiner,
119
- num_inference_steps=num_inference_steps_refiner,
120
- image=latents,
121
- generator=generator).images[0]
 
 
 
 
122
  return image
123
 
124
 
125
  examples = [
126
- 'Astronaut in a jungle, cold color palette, muted colors, detailed, 8k',
127
- 'An astronaut riding a green horse',
128
  ]
129
 
130
- with gr.Blocks(css='style.css') as demo:
131
  gr.Markdown(DESCRIPTION)
132
- gr.DuplicateButton(value='Duplicate Space for private use',
133
- elem_id='duplicate-button',
134
- visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
 
 
135
  with gr.Group():
136
  with gr.Row():
137
  prompt = gr.Text(
138
- label='Prompt',
139
  show_label=False,
140
  max_lines=1,
141
- placeholder='Enter your prompt',
142
  container=False,
143
  )
144
- run_button = gr.Button('Run', scale=0)
145
- result = gr.Image(label='Result', show_label=False)
146
- with gr.Accordion('Advanced options', open=False):
147
  with gr.Row():
148
- use_negative_prompt = gr.Checkbox(label='Use negative prompt',
149
- value=False)
150
- use_prompt_2 = gr.Checkbox(label='Use prompt 2', value=False)
151
- use_negative_prompt_2 = gr.Checkbox(label='Use negative prompt 2',
152
- value=False)
153
  negative_prompt = gr.Text(
154
- label='Negative prompt',
155
  max_lines=1,
156
- placeholder='Enter a negative prompt',
157
  visible=False,
158
  )
159
  prompt_2 = gr.Text(
160
- label='Prompt 2',
161
  max_lines=1,
162
- placeholder='Enter your prompt',
163
  visible=False,
164
  )
165
  negative_prompt_2 = gr.Text(
166
- label='Negative prompt 2',
167
  max_lines=1,
168
- placeholder='Enter a negative prompt',
169
  visible=False,
170
  )
171
 
172
- seed = gr.Slider(label='Seed',
173
- minimum=0,
174
- maximum=MAX_SEED,
175
- step=1,
176
- value=0)
177
- randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
 
 
178
  with gr.Row():
179
  width = gr.Slider(
180
- label='Width',
181
  minimum=256,
182
  maximum=MAX_IMAGE_SIZE,
183
  step=32,
184
  value=1024,
185
  )
186
  height = gr.Slider(
187
- label='Height',
188
  minimum=256,
189
  maximum=MAX_IMAGE_SIZE,
190
  step=32,
191
  value=1024,
192
  )
193
- apply_refiner = gr.Checkbox(label='Apply refiner',
194
- value=False,
195
- visible=ENABLE_REFINER)
196
  with gr.Row():
197
- guidance_scale_base = gr.Slider(label='Guidance scale for base',
198
- minimum=1,
199
- maximum=20,
200
- step=0.1,
201
- value=5.0)
 
 
202
  num_inference_steps_base = gr.Slider(
203
- label='Number of inference steps for base',
204
  minimum=10,
205
  maximum=100,
206
  step=1,
207
- value=50)
 
208
  with gr.Row(visible=False) as refiner_params:
209
  guidance_scale_refiner = gr.Slider(
210
- label='Guidance scale for refiner',
211
  minimum=1,
212
  maximum=20,
213
  step=0.1,
214
- value=5.0)
 
215
  num_inference_steps_refiner = gr.Slider(
216
- label='Number of inference steps for refiner',
217
  minimum=10,
218
  maximum=100,
219
  step=1,
220
- value=50)
 
221
 
222
- gr.Examples(examples=examples,
223
- inputs=prompt,
224
- outputs=result,
225
- fn=generate,
226
- cache_examples=CACHE_EXAMPLES)
 
 
227
 
228
  use_negative_prompt.change(
229
  fn=lambda x: gr.update(visible=x),
@@ -281,7 +293,7 @@ with gr.Blocks(css='style.css') as demo:
281
  fn=generate,
282
  inputs=inputs,
283
  outputs=result,
284
- api_name='run',
285
  )
286
  negative_prompt.submit(
287
  fn=randomize_seed_fn,
@@ -331,4 +343,6 @@ with gr.Blocks(css='style.css') as demo:
331
  outputs=result,
332
  api_name=False,
333
  )
334
- demo.queue(max_size=20).launch()
 
 
 
11
  import torch
12
  from diffusers import DiffusionPipeline
13
 
14
+ DESCRIPTION = "# SD-XL"
15
  if not torch.cuda.is_available():
16
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
20
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
21
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
22
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
23
+ ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1"
 
24
 
25
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
26
  if torch.cuda.is_available():
27
  pipe = DiffusionPipeline.from_pretrained(
28
+ "stabilityai/stable-diffusion-xl-base-1.0",
29
  torch_dtype=torch.float16,
30
  use_safetensors=True,
31
+ variant="fp16",
32
+ )
33
  if ENABLE_REFINER:
34
  refiner = DiffusionPipeline.from_pretrained(
35
+ "stabilityai/stable-diffusion-xl-refiner-1.0",
36
  torch_dtype=torch.float16,
37
  use_safetensors=True,
38
+ variant="fp16",
39
+ )
40
 
41
  if ENABLE_CPU_OFFLOAD:
42
  pipe.enable_model_cpu_offload()
 
48
  refiner.to(device)
49
 
50
  if USE_TORCH_COMPILE:
51
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
 
52
  if ENABLE_REFINER:
53
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
 
 
54
  else:
55
  pipe = None
56
  refiner = None
 
62
  return seed
63
 
64
 
65
+ def generate(
66
+ prompt: str,
67
+ negative_prompt: str = "",
68
+ prompt_2: str = "",
69
+ negative_prompt_2: str = "",
70
+ use_negative_prompt: bool = False,
71
+ use_prompt_2: bool = False,
72
+ use_negative_prompt_2: bool = False,
73
+ seed: int = 0,
74
+ width: int = 1024,
75
+ height: int = 1024,
76
+ guidance_scale_base: float = 5.0,
77
+ guidance_scale_refiner: float = 5.0,
78
+ num_inference_steps_base: int = 50,
79
+ num_inference_steps_refiner: int = 50,
80
+ apply_refiner: bool = False,
81
+ ) -> PIL.Image.Image:
82
  generator = torch.Generator().manual_seed(seed)
83
 
84
  if not use_negative_prompt:
 
89
  negative_prompt_2 = None # type: ignore
90
 
91
  if not apply_refiner:
92
+ return pipe(
93
+ prompt=prompt,
94
+ negative_prompt=negative_prompt,
95
+ prompt_2=prompt_2,
96
+ negative_prompt_2=negative_prompt_2,
97
+ width=width,
98
+ height=height,
99
+ guidance_scale=guidance_scale_base,
100
+ num_inference_steps=num_inference_steps_base,
101
+ generator=generator,
102
+ output_type="pil",
103
+ ).images[0]
104
  else:
105
+ latents = pipe(
106
+ prompt=prompt,
107
+ negative_prompt=negative_prompt,
108
+ prompt_2=prompt_2,
109
+ negative_prompt_2=negative_prompt_2,
110
+ width=width,
111
+ height=height,
112
+ guidance_scale=guidance_scale_base,
113
+ num_inference_steps=num_inference_steps_base,
114
+ generator=generator,
115
+ output_type="latent",
116
+ ).images
117
+ image = refiner(
118
+ prompt=prompt,
119
+ negative_prompt=negative_prompt,
120
+ prompt_2=prompt_2,
121
+ negative_prompt_2=negative_prompt_2,
122
+ guidance_scale=guidance_scale_refiner,
123
+ num_inference_steps=num_inference_steps_refiner,
124
+ image=latents,
125
+ generator=generator,
126
+ ).images[0]
127
  return image
128
 
129
 
130
  examples = [
131
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
132
+ "An astronaut riding a green horse",
133
  ]
134
 
135
+ with gr.Blocks(css="style.css") as demo:
136
  gr.Markdown(DESCRIPTION)
137
+ gr.DuplicateButton(
138
+ value="Duplicate Space for private use",
139
+ elem_id="duplicate-button",
140
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
141
+ )
142
  with gr.Group():
143
  with gr.Row():
144
  prompt = gr.Text(
145
+ label="Prompt",
146
  show_label=False,
147
  max_lines=1,
148
+ placeholder="Enter your prompt",
149
  container=False,
150
  )
151
+ run_button = gr.Button("Run", scale=0)
152
+ result = gr.Image(label="Result", show_label=False)
153
+ with gr.Accordion("Advanced options", open=False):
154
  with gr.Row():
155
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
156
+ use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
157
+ use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
 
 
158
  negative_prompt = gr.Text(
159
+ label="Negative prompt",
160
  max_lines=1,
161
+ placeholder="Enter a negative prompt",
162
  visible=False,
163
  )
164
  prompt_2 = gr.Text(
165
+ label="Prompt 2",
166
  max_lines=1,
167
+ placeholder="Enter your prompt",
168
  visible=False,
169
  )
170
  negative_prompt_2 = gr.Text(
171
+ label="Negative prompt 2",
172
  max_lines=1,
173
+ placeholder="Enter a negative prompt",
174
  visible=False,
175
  )
176
 
177
+ seed = gr.Slider(
178
+ label="Seed",
179
+ minimum=0,
180
+ maximum=MAX_SEED,
181
+ step=1,
182
+ value=0,
183
+ )
184
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
185
  with gr.Row():
186
  width = gr.Slider(
187
+ label="Width",
188
  minimum=256,
189
  maximum=MAX_IMAGE_SIZE,
190
  step=32,
191
  value=1024,
192
  )
193
  height = gr.Slider(
194
+ label="Height",
195
  minimum=256,
196
  maximum=MAX_IMAGE_SIZE,
197
  step=32,
198
  value=1024,
199
  )
200
+ apply_refiner = gr.Checkbox(label="Apply refiner", value=False, visible=ENABLE_REFINER)
 
 
201
  with gr.Row():
202
+ guidance_scale_base = gr.Slider(
203
+ label="Guidance scale for base",
204
+ minimum=1,
205
+ maximum=20,
206
+ step=0.1,
207
+ value=5.0,
208
+ )
209
  num_inference_steps_base = gr.Slider(
210
+ label="Number of inference steps for base",
211
  minimum=10,
212
  maximum=100,
213
  step=1,
214
+ value=50,
215
+ )
216
  with gr.Row(visible=False) as refiner_params:
217
  guidance_scale_refiner = gr.Slider(
218
+ label="Guidance scale for refiner",
219
  minimum=1,
220
  maximum=20,
221
  step=0.1,
222
+ value=5.0,
223
+ )
224
  num_inference_steps_refiner = gr.Slider(
225
+ label="Number of inference steps for refiner",
226
  minimum=10,
227
  maximum=100,
228
  step=1,
229
+ value=50,
230
+ )
231
 
232
+ gr.Examples(
233
+ examples=examples,
234
+ inputs=prompt,
235
+ outputs=result,
236
+ fn=generate,
237
+ cache_examples=CACHE_EXAMPLES,
238
+ )
239
 
240
  use_negative_prompt.change(
241
  fn=lambda x: gr.update(visible=x),
 
293
  fn=generate,
294
  inputs=inputs,
295
  outputs=result,
296
+ api_name="run",
297
  )
298
  negative_prompt.submit(
299
  fn=randomize_seed_fn,
 
343
  outputs=result,
344
  api_name=False,
345
  )
346
+
347
+ if __name__ == "__main__":
348
+ demo.queue(max_size=20).launch()
notebook.ipynb CHANGED
@@ -51,47 +51,9 @@
51
  "source": [
52
  "import os\n",
53
  "\n",
54
- "os.environ['ENABLE_REFINER'] = '0'"
55
- ]
56
- },
57
- {
58
- "cell_type": "code",
59
- "execution_count": null,
60
- "metadata": {
61
- "colab": {
62
- "base_uri": "https://localhost:8080/",
63
- "height": 710,
64
- "referenced_widgets": [
65
- "68c1e33d84b94f009db258e278fe7068",
66
- "b1b1ca6d1cc44a738c3b4b6de17f3a5b",
67
- "104833166be14046873bfea2c1a2a887",
68
- "32f25821a48d4c9589f58c134e3b56d7",
69
- "3ed7cc7759074df58a91fd7fb28a4933",
70
- "c8885bd4a35d4cdcbb6acce5c52e15e2",
71
- "5d1d83dfd090460d9f948b71f95aaed8",
72
- "773e06ed1d734e53a7def5305cd35131",
73
- "753b336dbeb147349e4520715035d8da",
74
- "c5215236213242b89a971a1095afcea5",
75
- "bd0a6a0e16944533b59eaa3f5188e99f",
76
- "96b1de32a367400bba75babd39bc7308",
77
- "65291f8203964f4499a1b422af91f75e",
78
- "0c3fad2a850b4320b47586ff4d0ac73e",
79
- "69a6be1033c5424988a702c5d69590ee",
80
- "b22729413d9b449a94892b91d95cf1e4",
81
- "6c8f51c69f394eeea67eb515831f60b2",
82
- "bb779e8367e44a939d607ace70493d94",
83
- "4d3862b22c3245d8b3d8b6442e149c8d",
84
- "16ef5a40c9d441aea180d1732442df97",
85
- "db54ca7070cf43adbda196d44967464c",
86
- "cadddb2624804c308710a219bf8cf4f3"
87
- ]
88
- },
89
- "id": "4FTmJkt_J8j_",
90
- "outputId": "850aba86-acb4-4452-bac2-28b5c815ec0f"
91
- },
92
- "outputs": [],
93
- "source": [
94
- "import app"
95
  ]
96
  },
97
  {
 
51
  "source": [
52
  "import os\n",
53
  "\n",
54
+ "import app\n",
55
+ "\n",
56
+ "os.environ[\"ENABLE_REFINER\"] = \"0\""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  ]
58
  },
59
  {