justram commited on
Commit
b558191
1 Parent(s): 0e18558

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +50 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AToMiC Prebuilt Indexes
2
+
3
+ ## Example Usage:
4
+
5
+ ### Reproduction
6
+
7
+ Toolkits:
8
+ https://github.com/TREC-AToMiC/AToMiC/tree/main/examples/dense_retriever_baselines
9
+
10
+ ```bash
11
+ # Skip the encode and index steps, search with the prebuilt indexes and topics directly
12
+
13
+ python search.py \
14
+ --topics topics/openai.clip-vit-base-patch32.text.validation \
15
+ --index indexes/openai.clip-vit-base-patch32.image.faiss.flat \
16
+ --hits 1000 \
17
+ --output runs/run.openai.clip-vit-base-patch32.validation.t2i.large.trec
18
+
19
+ python search.py \
20
+ --topics topics/openai.clip-vit-base-patch32.image.validation \
21
+ --index indexes/openai.clip-vit-base-patch32.text.faiss.flat \
22
+ --hits 1000 \
23
+ --output runs/run.openai.clip-vit-base-patch32.validation.i2t.large.trec
24
+ ```
25
+
26
+ ### Explore AToMiC datasets
27
+
28
+ ```python
29
+ import torch
30
+ from pathlib import Path
31
+ from datasets import load_dataset
32
+ from transformers import AutoModel, AutoProcessor
33
+
34
+ INDEX_DIR='indexes'
35
+ INDEX_NAME='openai.clip-vit-base-patch32.image.faiss.flat'
36
+ QUERY = 'Elizabeth II'
37
+
38
+ images = load_dataset('TREC-AToMiC/AToMiC-Images-v0.2', split='train')
39
+ images.load_faiss_index(index_name=INDEX_NAME, file=Path(INDEX_DIR, INDEX_NAME, 'index'))
40
+
41
+ model = AutoModel.from_pretrained('openai/clip-vit-base-patch32')
42
+ processor = AutoProcessor.from_pretrained('openai/clip-vit-base-patch32')
43
+
44
+ # prebuilt indexes contain L2-normalized vectors
45
+ with torch.no_grad():
46
+ q_embedding = model.get_text_features(**processor(text=query, return_tensors="pt"))
47
+ q_embedding = torch.nn.functional.normalize(q_embedding, dim=-1).detach().numpy()
48
+
49
+ scores, retrieved = images.get_nearest_examples(index_name, q_embedding, k=10)
50
+ ```