huy-ha commited on
Commit
5cbb0f4
1 Parent(s): 83700f0
Files changed (3) hide show
  1. CLIP/clip/__init__.py +1 -1
  2. app.py +3 -1
  3. requirements.txt +2 -1
CLIP/clip/__init__.py CHANGED
@@ -148,7 +148,7 @@ class ClipWrapper:
148
  text_labels,
149
  horizontal_flipping=False,
150
  positive_attn_only: bool = False,
151
- tile_batch_size=16,
152
  prompt_batch_size=32,
153
  tile_interpolate_batch_size=16,
154
  **kwargs
 
148
  text_labels,
149
  horizontal_flipping=False,
150
  positive_attn_only: bool = False,
151
+ tile_batch_size=32,
152
  prompt_batch_size=32,
153
  tile_interpolate_batch_size=16,
154
  **kwargs
app.py CHANGED
@@ -43,12 +43,14 @@ def generate_relevancy(
43
  img = np.asarray(Image.fromarray(img).resize((244 * 4, 244 * 4)))
44
  assert img.dtype == np.uint8
45
  h, w, c = img.shape
 
46
  grads = ClipWrapper.get_clip_saliency(
47
  img=img,
48
  text_labels=np.array(labels),
49
  prompts=prompts,
50
  **saliency_configs[saliency_config](h),
51
  )[0]
 
52
  if subtract_mean:
53
  grads -= grads.mean(axis=0)
54
  grads = grads.cpu().numpy()
@@ -78,7 +80,7 @@ def generate_relevancy(
78
 
79
  iface = gr.Interface(
80
  title="Semantic Abstraction Multi-scale Relevancy Extractor",
81
- description="""A CPU-only demo of [Semantic Abstraction](https://semantic-abstraction.cs.columbia.edu/)'s Multi-Scale Relevancy Extractor. To run GPU inference locally, use the [official codebase release](https://github.com/columbia-ai-robotics/semantic-abstraction).
82
 
83
  This relevancy extractor builds heavily on [Chefer et al.'s codebase](https://github.com/hila-chefer/Transformer-MM-Explainability) and [CLIP on Wheels' codebase](https://cow.cs.columbia.edu/).""",
84
  fn=generate_relevancy,
 
43
  img = np.asarray(Image.fromarray(img).resize((244 * 4, 244 * 4)))
44
  assert img.dtype == np.uint8
45
  h, w, c = img.shape
46
+ start = time()
47
  grads = ClipWrapper.get_clip_saliency(
48
  img=img,
49
  text_labels=np.array(labels),
50
  prompts=prompts,
51
  **saliency_configs[saliency_config](h),
52
  )[0]
53
+ print("inference took", float(time() - start))
54
  if subtract_mean:
55
  grads -= grads.mean(axis=0)
56
  grads = grads.cpu().numpy()
 
80
 
81
  iface = gr.Interface(
82
  title="Semantic Abstraction Multi-scale Relevancy Extractor",
83
+ description="""A demo of [Semantic Abstraction](https://semantic-abstraction.cs.columbia.edu/)'s Multi-Scale Relevancy Extractor. To run GPU inference locally, use the [official codebase release](https://github.com/columbia-ai-robotics/semantic-abstraction).
84
 
85
  This relevancy extractor builds heavily on [Chefer et al.'s codebase](https://github.com/hila-chefer/Transformer-MM-Explainability) and [CLIP on Wheels' codebase](https://cow.cs.columbia.edu/).""",
86
  fn=generate_relevancy,
requirements.txt CHANGED
@@ -1,8 +1,9 @@
1
  ftfy
2
  matplotlib
 
3
  torch
4
- tqdm
5
  torchvision
 
6
  regex
7
  numpy
8
  Pillow
 
1
  ftfy
2
  matplotlib
3
+ --extra-index-url https://download.pytorch.org/whl/cu113
4
  torch
 
5
  torchvision
6
+ tqdm
7
  regex
8
  numpy
9
  Pillow