hysts HF staff commited on
Commit
5086e39
1 Parent(s): 46ab592
Files changed (3) hide show
  1. app_stylization.py +1 -2
  2. app_zero_shot.py +1 -4
  3. settings.py +0 -3
app_stylization.py CHANGED
@@ -7,7 +7,7 @@ import torch
7
  from controlnet_aux import CannyDetector
8
  from diffusers.pipelines import BlipDiffusionControlNetPipeline
9
 
10
- from settings import CACHE_EXAMPLES, DEFAULT_NEGATIVE_PROMPT, MAX_INFERENCE_STEPS
11
  from utils import MAX_SEED, randomize_seed_fn
12
 
13
  canny_detector = CannyDetector()
@@ -105,7 +105,6 @@ with gr.Blocks() as demo:
105
  ],
106
  outputs=result,
107
  fn=run,
108
- cache_examples=CACHE_EXAMPLES,
109
  )
110
 
111
  inputs = [
 
7
  from controlnet_aux import CannyDetector
8
  from diffusers.pipelines import BlipDiffusionControlNetPipeline
9
 
10
+ from settings import DEFAULT_NEGATIVE_PROMPT, MAX_INFERENCE_STEPS
11
  from utils import MAX_SEED, randomize_seed_fn
12
 
13
  canny_detector = CannyDetector()
 
105
  ],
106
  outputs=result,
107
  fn=run,
 
108
  )
109
 
110
  inputs = [
app_zero_shot.py CHANGED
@@ -6,14 +6,12 @@ import spaces
6
  import torch
7
  from diffusers.pipelines import BlipDiffusionPipeline
8
 
9
- from settings import CACHE_EXAMPLES, DEFAULT_NEGATIVE_PROMPT, MAX_INFERENCE_STEPS
10
  from utils import MAX_SEED, randomize_seed_fn
11
 
12
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
13
  if torch.cuda.is_available():
14
  pipe = BlipDiffusionPipeline.from_pretrained("Salesforce/blipdiffusion", torch_dtype=torch.float16).to(device)
15
- else:
16
- pipe = None
17
 
18
 
19
  @spaces.GPU
@@ -95,7 +93,6 @@ with gr.Blocks() as demo:
95
  ],
96
  outputs=result,
97
  fn=run,
98
- cache_examples=CACHE_EXAMPLES,
99
  )
100
 
101
  inputs = [
 
6
  import torch
7
  from diffusers.pipelines import BlipDiffusionPipeline
8
 
9
+ from settings import DEFAULT_NEGATIVE_PROMPT, MAX_INFERENCE_STEPS
10
  from utils import MAX_SEED, randomize_seed_fn
11
 
12
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
13
  if torch.cuda.is_available():
14
  pipe = BlipDiffusionPipeline.from_pretrained("Salesforce/blipdiffusion", torch_dtype=torch.float16).to(device)
 
 
15
 
16
 
17
  @spaces.GPU
 
93
  ],
94
  outputs=result,
95
  fn=run,
 
96
  )
97
 
98
  inputs = [
settings.py CHANGED
@@ -1,5 +1,2 @@
1
- import os
2
-
3
  MAX_INFERENCE_STEPS = 50
4
  DEFAULT_NEGATIVE_PROMPT = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"
5
- CACHE_EXAMPLES = os.getenv("CACHE_EXAMPLES") == "1"
 
 
 
1
  MAX_INFERENCE_STEPS = 50
2
  DEFAULT_NEGATIVE_PROMPT = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate"