File size: 2,384 Bytes
393ef5f
 
 
f3ed741
 
393ef5f
 
 
f3ed741
 
fcb2a5c
393ef5f
 
 
f3ed741
393ef5f
f3ed741
 
393ef5f
 
 
fcb2a5c
 
f3ed741
 
393ef5f
 
 
 
 
 
 
f3ed741
 
393ef5f
 
f3ed741
393ef5f
 
 
 
f3ed741
393ef5f
 
f3ed741
393ef5f
f3ed741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393ef5f
f3ed741
393ef5f
f3ed741
 
393ef5f
f3ed741
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
import tarfile
import wget 
import numpy as np
import tensorflow as tf
from huggingface_hub import snapshot_download
import gradio as gr

from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils

# Constants
REPO_ID = "wongshennan/iti107_model"
PATH_TO_LABELS = 'data/label_map.pbtxt'

# Load category index
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)

def pil_image_as_numpy_array(pil_img):
    """Convert PIL image to numpy array."""
    img_array = tf.keras.utils.img_to_array(pil_img)
    img_array = np.expand_dims(img_array, axis=0)
    return img_array

def load_model():
    """Load model from Hugging Face Hub."""
    download_dir = snapshot_download(REPO_ID)
    saved_model_dir = os.path.join(download_dir, "saved_model")
    return tf.saved_model.load(saved_model_dir)

def load_model2():
    """Load model from a tar.gz file."""
    wget.download("https://nyp-aicourse.s3-ap-southeast-1.amazonaws.com/pretrained-models/balloon_model.tar.gz")
    tarfile.open("balloon_model.tar.gz").extractall()
    model_dir = 'saved_model'
    return tf.saved_model.load(str(model_dir))

def predict(pil_img):
    """Predict method for Gradio interface."""
    image_np = pil_image_as_numpy_array(pil_img)
    return detect_and_visualize(image_np)

def detect_and_visualize(image_np):
    """Helper function to run object detection and visualize results."""
    results = detection_model(image_np)
    result = {key: value.numpy() for key, value in results.items()}

    label_id_offset = 0
    image_np_with_detections = image_np.copy()

    viz_utils.visualize_boxes_and_labels_on_image_array(
        image_np_with_detections[0],
        result['detection_boxes'][0],
        (result['detection_classes'][0] + label_id_offset).astype(int),
        result['detection_scores'][0],
        category_index,
        use_normalized_coordinates=True,
        max_boxes_to_draw=200,
        min_score_thresh=.60,
        agnostic_mode=False,
        line_thickness=2)

    return tf.keras.utils.array_to_img(image_np_with_detections[0])

# Load the model
detection_model = load_model()

# Launch Gradio Interface
gr.Interface(fn=predict,
             inputs=gr.Image(type="pil"),
             outputs=gr.Image(type="pil")
             ).launch(share=True)