import gradio as gr from upstash_vector import AsyncIndex from transformers import AutoFeatureExtractor, AutoModel from datasets import load_dataset index = AsyncIndex.from_env() model_ckpt = "google/vit-base-patch16-224-in21k" extractor = AutoFeatureExtractor.from_pretrained(model_ckpt) model = AutoModel.from_pretrained(model_ckpt) hidden_dim = model.config.hidden_size dataset = load_dataset("BounharAbdelaziz/Face-Aging-Dataset") with gr.Blocks() as demo: gr.Markdown( """ # Find Your Twins Upload your face and find the most similar faces from [Face Aging Dataset](https://huggingface.co/datasets/BounharAbdelaziz/Face-Aging-Dataset) using Google's [VIT](https://huggingface.co/google/vit-base-patch16-224-in21k) model. For best results please use 1x1 ratio face images, take a look at examples. The Vector similarity search is powered by [Upstash Vector](https://upstash.com) 🚀. You can check our blog *post* to learn more. """ ) with gr.Tab("Basic"): with gr.Row(): with gr.Column(scale=1): input_image = gr.Image(type="pil") with gr.Column(scale=2): output_images = gr.Gallery() @input_image.change(inputs=input_image, outputs=output_images) async def find_similar_faces(image): if image is None: return None inputs = extractor(images=image, return_tensors="pt") outputs = model(**inputs) embed = outputs.last_hidden_state[0][0] result = await index.query(vector=embed.tolist(), top_k=4) return [dataset["train"][int(vector.id)]["image"] for vector in result] gr.Examples( examples=[ dataset["train"][6]["image"], dataset["train"][7]["image"], dataset["train"][8]["image"], ], inputs=input_image, outputs=output_images, fn=find_similar_faces, cache_examples=False, ) with gr.Tab("Advanced"): with gr.Row(): with gr.Column(scale=1): adv_input_image = gr.Image(type="pil") adv_image_count = gr.Slider(1, 30, 10, label="Image Count") adv_button = gr.Button("Submit") with gr.Column(scale=2): adv_output_image = gr.Gallery() async def find_similar_faces(image, count): inputs = extractor(images=image, return_tensors="pt") outputs = model(**inputs) embed = outputs.last_hidden_state[0][0] result = await index.query( vector=embed.tolist(), top_k=max(1, min(30, count)) ) return [dataset["train"][int(vector.id)]["image"] for vector in result] adv_button.click( fn=find_similar_faces, inputs=[adv_input_image, adv_image_count], outputs=[adv_output_image], ) adv_input_image.upload( fn=find_similar_faces, inputs=[adv_input_image, adv_image_count], outputs=[adv_output_image], ) if __name__ == "__main__": demo.launch(debug=True, share=True)