Spaces:
Running
Running
File size: 3,672 Bytes
a5661ba 9ef0a4b 0b59336 a5661ba 9ef0a4b 0b59336 b9b8aa5 a5661ba 9ef0a4b b9b8aa5 a5661ba b9b8aa5 9ef0a4b 0b59336 9ef0a4b 0b59336 b81e987 0b59336 9ef0a4b fb9f80e 0b59336 9ef0a4b 0b59336 a5661ba 7a7e1da 0b59336 7a7e1da 0b59336 9ef0a4b 0b59336 cc47f85 0f9cbe1 cc47f85 0b59336 cc47f85 0b59336 cc47f85 0b59336 b9b8aa5 a5661ba b9b8aa5 a5661ba b9b8aa5 a5661ba b9b8aa5 a5661ba 9ef0a4b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import os
import sys
import json
import requests
import gradio as gr
from huggingface_hub import HfFileSystem, hf_hub_download
from PIL import Image
# Environment Variables
HF_TOKEN = os.getenv("HF_TOKEN")
HF_DATASET = os.getenv('HF_DATASET')
SPACE_SUBDOMAIN = os.environ['SPACE_SUBDOMAIN']
def get_image_with_auth(file_name):
"""Retrieve an image using Hugging Face's hub with authentication."""
image_path = hf_hub_download(repo_id=HF_DATASET, repo_type="dataset", filename=file_name, token=HF_TOKEN)
return Image.open(image_path)
def recognize_face(image):
"""
Function to send either an image URL to the FastAPI backend and receive results.
"""
# Set the URL to your FastAPI endpoint
url = 'https://dwancin-face-match-api.hf.space/recognize/'
# Prepare the payload with the image data and specify the type
payload = {
"image": f"https://{SPACE_SUBDOMAIN}.hf.space/file={image}",
"type": "url"
}
# Prepare the headers with the Authorization token
headers = {
"Authorization": f"Bearer {HF_TOKEN}"
}
# Send POST request to FastAPI server with the image data and type
response = requests.post(url, json=payload, headers=headers)
# Process response
if response.status_code == 200:
response_data = response.json()
image_path = response_data.get('image')
if image_path:
image_file = get_image_with_auth(image_path)
formatted_json = json.dumps(response_data, indent=4)
info = f"```json\n{formatted_json}\n```"
print(formatted_json)
return image_file, info
else:
info = "No image path found in response."
print(info)
return None, info
else:
info = f"Error: {response.status_code} - {response.text}"
print(info)
return None, f"Error: {response.status_code} - {response.text}"
def update(output_info):
return gr.update(visible=True)
# Gradio setup
with gr.Blocks(
analytics_enabled=False,
title="Face Match",
css='''
.gradio-container { max-width: 700px !important; }
.source-selection { display: none !important; }
#clear { max-width: 140px; }
#submit { max-width: 240px; }
.svelte-1pijsyv { border-radius: 0 !important; }
.svelte-s6ybro { display: none !important; }
'''
) as demo:
title = gr.HTML("<h1><center>Face Match</center></h1>")
subtitle = gr.HTML("<h3><center>Upload an image, and the system will find the most similar face in our dataset.</center></h3>")
with gr.Row():
with gr.Column():
with gr.Group():
with gr.Row(equal_height=True):
input_image = gr.Image(type="filepath", show_label=False, interactive=True)
output_image = gr.Image(type="filepath", show_label=False, interactive=False, show_share_button=False, show_download_button=False)
with gr.Row():
output_info = gr.Markdown(visible=False)
with gr.Row():
clear = gr.ClearButton([input_image, output_image, output_info], elem_id="clear", elem_classes="button")
submit = gr.Button("Submit", variant="primary", elem_id="submit", elem_classes="button")
with gr.Row():
examples = gr.Examples(["examples/0001.png", "examples/0002.png", "examples/0003.png", "examples/0004.png"], input_image)
output_image.change(fn=update, inputs=output_info, outputs=output_info)
submit.click(fn=recognize_face, inputs=input_image, outputs=[output_image, output_info])
# Launch
demo.launch(show_api=False) |