Datasets:

Modalities:
Tabular
Text
Formats:
arrow
Languages:
English
Libraries:
Datasets
License:
File size: 4,721 Bytes
49a1de6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import torch
from PIL import Image
from transformers import CLIPProcessor, CLIPModel
import numpy as np
import os
from datasets import load_dataset, Dataset

# prevent OMP error?
os.environ['KMP_DUPLICATE_LIB_OK']='True'

# ignore images? Do this unless you have a lot of memory!
remove_images = True
verbose = True

# Save locally for faster use next time
cache_on_disk = True

# Load datasets and remove images (or not)
def load_and_prepare_data():
    if verbose:
       print("Loading")
    # Load from HF
    embeddings_data = load_dataset("metmuseum/openaccess_embeddings", split='train')
    collection_data = load_dataset("metmuseum/openaccess", split='train')

    # Strip out image binary data (or not)
    if remove_images:
        cd_cleaned = collection_data.remove_columns(['jpg'])
        # Convert collection to pandas dataframes
        collection_df = cd_cleaned.to_pandas()
    else:
        # Convert collection to pandas dataframes
        collection_df = collection_data.to_pandas()
        
    # Convert embeddings to pandas dataframes
    embedding_df = embeddings_data.to_pandas()

    # Merge the datasets on "Object ID"
    if verbose:
        print("Merging")

    merged_df = collection_df.merge(embedding_df, on="Object ID", how="left")

    if verbose:
        print("Merged")

    # Convert back to Huggingface dataset
    first_dataset = Dataset.from_pandas(merged_df)

    # Remove empty embeddings - note, this will result in about 1/2 of the samples being tossed
    # But make our lives easier when passing to FAISS etc
    merged_dataset = first_dataset.filter(lambda example: example['Embedding'] is not None)
    
    if cache_on_disk:
        merged_dataset.save_to_disk('metmuseum_merged')
    
    return merged_dataset

# Function to build the FAISS index & (optionally) save
def build_faiss_index(dataset, index_file):
    dataset.add_faiss_index('Embedding')
    if cache_on_disk:
        dataset.save_faiss_index('Embedding', index_file)

# Function to load the FAISS on-disk index
def load_faiss_index(dataset, index_file):
    dataset.load_faiss_index('Embedding',index_file)

def search_embeddings(dataset, query_embedding, k=5):
    # """Search for the top k closest embeddings in the index."""
    scores, samples = dataset.get_nearest_examples(
        "Embedding", query_embedding, k
    )
    return scores, samples

def query_text(processor, model, text):
    """Convert a text query into an embedding."""
    inputs = processor(text=text, return_tensors="pt")
    with torch.no_grad():
        text_embedding = model.get_text_features(**inputs).numpy()
    return text_embedding

def query_image(processor, model, image_path):
    """Convert an image query into an embedding."""
    image = Image.open(image_path)
    inputs = processor(images=image, return_tensors="pt")
    with torch.no_grad():
        image_embedding = model.get_image_features(**inputs).numpy()
    print(image_embedding.shape)
    return image_embedding[0]

if __name__ == "__main__":
    index_file = "faiss_index_file.index"
    dataset_path = "metmuseum_merged"

    # Try to load cahced data & cahced FAISS index
    if os.path.exists(dataset_path):
        dataset = Dataset.load_from_disk(dataset_path)
    else:
        dataset = load_and_prepare_data()

    if not os.path.exists(index_file):
        if verbose:
           print("Building index")
        build_faiss_index(dataset, index_file)
    else:
        load_faiss_index(dataset, index_file)

    # Load CLIP to embed text / images to search
    model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
    processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

    # Example usage for text query
    # This doesn't really seem to work right now...
    text_query = "A painting of a sunflower"
    text_embedding = query_text(processor, model, text_query)

    # K = how many results to get
    scores, samples = search_embeddings(dataset, text_embedding, k=5)

    print("\Text Query Results:")
    print(scores)
    # The results are dataset columns -- you could loop through all fields,
    # Or just get a URL like below
    for result in samples["Object ID"]:
        print("https://metmuseum.org/art/collection/search/" + str(result))

    # Example usage for image query
    image_path = "DP355692.jpg"  # Replace with the path to your image file
    image_embedding = query_image(processor, model, image_path)

    # K = how many results to get
    scores, samples = search_embeddings(dataset, image_embedding, k=5)

    print("\nImage Query Results:")
    print(scores)
    for result in samples["Object ID"]:
        print("https://metmuseum.org/art/collection/search/" + str(result))