Spaces:
Runtime error
Runtime error
File size: 1,983 Bytes
dbb1308 77403d5 dbb1308 d21fc46 77403d5 153cbe2 dbb1308 77403d5 dbb1308 77403d5 dbb1308 77403d5 dbb1308 77403d5 dbb1308 77403d5 dbb1308 77403d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import gradio as gr
from torchvision.transforms import Resize
import torch
from upstash_vector import Index
import os
index = Index.from_env()
resize_transform = Resize((250,250))
with gr.Blocks() as demo:
gr.Markdown(
"""
# Find Your Twins
Upload your face and find the most similar people from the X dataset. Powered by [Upstash Vector](https://upstash.com) 🚀
"""
)
with gr.Tab("Basic"):
with gr.Row():
with gr.Column(scale=1):
input_image = gr.Image(type="pil")
with gr.Column(scale=3):
output_image = gr.Gallery()
@input_image.upload(inputs=input_image, outputs=output_image)
def find_similar_faces(image):
resized_image = resize_transform(image)
inputs = extractor(images=image, return_tensors="pt")
outputs = model(**inputs)
embed = outputs.last_hidden_state[0][0]
result = index.query(vector=embed.tolist(), top_k=3)
return[dataset["train"][int(vector.id[3:])]["image"] for vector in result]
with gr.Tab("Advanced"):
with gr.Row():
with gr.Column(scale=1):
adv_input_image = gr.Image(type="pil")
adv_image_count = gr.Number(9, label="Image Count")
with gr.Column(scale=3):
adv_output_image = gr.Gallery(height=1000)
@adv_input_image.upload(inputs=[adv_input_image, adv_image_count], outputs=[adv_output_image])
def find_similar_faces(image, count):
resized_image = resize_transform(image)
inputs = extractor(images=image, return_tensors="pt")
outputs = model(**inputs)
embed = outputs.last_hidden_state[0][0]
result = index.query(vector=embed.tolist(), top_k=min(count, 9))
return[dataset["train"][int(vector.id[3:])]["image"] for vector in result]
if __name__ == "__main__":
demo.launch(debug=True) |