abreza commited on
Commit
d9aed5a
β€’
1 Parent(s): 1c6c14b

remove some unnecessary code

Browse files
Files changed (1) hide show
  1. app.py +1 -21
app.py CHANGED
@@ -25,27 +25,9 @@ from src.utils.infer_util import (remove_background, resize_foreground)
25
  from src.utils.mesh_util import save_glb, save_obj
26
  from src.utils.train_util import instantiate_from_config
27
 
28
- cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
29
- os.environ["TRANSFORMERS_CACHE"] = cache_path
30
- os.environ["HF_HUB_CACHE"] = cache_path
31
- os.environ["HF_HOME"] = cache_path
32
-
33
  torch.backends.cuda.matmul.allow_tf32 = True
34
 
35
 
36
- class timer:
37
- def __init__(self, method_name="timed process"):
38
- self.method = method_name
39
-
40
- def __enter__(self):
41
- self.start = time.time()
42
- print(f"{self.method} starts")
43
-
44
- def __exit__(self, exc_type, exc_val, exc_tb):
45
- end = time.time()
46
- print(f"{self.method} took {str(round(end - self.start, 2))}s")
47
-
48
-
49
  def find_cuda():
50
  cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
51
  if cuda_home and os.path.exists(cuda_home):
@@ -151,7 +133,7 @@ def make3d(images):
151
  @spaces.GPU
152
  def process_image(num_images, prompt):
153
  global pipe
154
- with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
155
  return pipe(
156
  prompt=[prompt]*num_images,
157
  generator=torch.Generator().manual_seed(123),
@@ -206,8 +188,6 @@ model = model.to(device)
206
 
207
  # Load text-to-image model
208
  print('Loading text-to-image model ...')
209
- if not path.exists(cache_path):
210
- os.makedirs(cache_path, exist_ok=True)
211
 
212
  pipe = StableDiffusionXLPipeline.from_pretrained(
213
  "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16)
 
25
  from src.utils.mesh_util import save_glb, save_obj
26
  from src.utils.train_util import instantiate_from_config
27
 
 
 
 
 
 
28
  torch.backends.cuda.matmul.allow_tf32 = True
29
 
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  def find_cuda():
32
  cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
33
  if cuda_home and os.path.exists(cuda_home):
 
133
  @spaces.GPU
134
  def process_image(num_images, prompt):
135
  global pipe
136
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
137
  return pipe(
138
  prompt=[prompt]*num_images,
139
  generator=torch.Generator().manual_seed(123),
 
188
 
189
  # Load text-to-image model
190
  print('Loading text-to-image model ...')
 
 
191
 
192
  pipe = StableDiffusionXLPipeline.from_pretrained(
193
  "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16)