File size: 1,219 Bytes
f3791c9
 
52477e5
f3791c9
b373ea6
 
 
 
52477e5
f3791c9
 
2caf5ff
f3791c9
52477e5
f3791c9
 
52477e5
ff1492f
 
78db2a1
f3791c9
 
 
52477e5
 
f3791c9
54bd402
8e90b77
52477e5
f3791c9
 
54bd402
777a836
f3791c9
 
 
 
 
603ee20
f3791c9
777a836
f3791c9
603ee20
f3791c9
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
from datasets import load_dataset
from sentence_transformers import SentenceTransformer

import os
import requests
os.environ['NO_PROXY'] = 'huggingface.co'

model = SentenceTransformer('clip-ViT-B-32')

# Candidate images.
dataset = load_dataset("sasha/pedro-embeddings-new")
ds = dataset["train"]
ds.add_faiss_index(column='embeddings')


def query(image, number_to_retrieve=1):
    input_image = model.encode(image)
    scores, retrieved_examples = ds.get_nearest_examples('embeddings', input_image, k=number_to_retrieve)
    return retrieved_examples['image'][0]


with gr.Blocks() as demo:
	gr.Markdown("# Find my Pedro Pascal")
	gr.Markdown("## Use this Space to find the Pedro Pascal most similar to your input image!")
	with gr.Row():
		with gr.Column(scale=1, min_width=600):
			inputs = gr.Image(type='pil')
			btn = gr.Button("Find my Pedro!")
			description = gr.Markdown()
			
		with gr.Column(scale=1, min_width=600):
			outputs=gr.Image()
			
	gr.Markdown("### Image Examples")
	gr.Examples(
	examples=["elton.jpg", "ken.jpg", "gaga.jpg", "taylor.jpg"],
	inputs=inputs,
	outputs=[outputs],
	fn=query,
	cache_examples=True,
	)
	btn.click(query, inputs, [outputs])
	
demo.launch()