File size: 2,277 Bytes
177c1b5
 
 
 
 
 
 
 
 
aa5c73f
 
177c1b5
 
a576b62
177c1b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa5c73f
 
 
 
177c1b5
 
 
a576b62
177c1b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a576b62
177c1b5
 
a576b62
177c1b5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import matplotlib.pyplot as plt
import numpy as np
from six import BytesIO
from PIL import Image
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.utils import ops as utils_op
import tarfile
from huggingface_hub import snapshot_download
import os
import gradio as gr

MODEL_REPO = 'sokonana/it107model'  
PATH_TO_LABELS = 'data/label_map.pbtxt'   
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)

def pil_image_as_numpy_array(pilimg):

    img_array = tf.keras.utils.img_to_array(pilimg)
    img_array = np.expand_dims(img_array, axis=0)
    return img_array
    
def load_image_into_numpy_array(path):
                                    
    image = None
    image_data = tf.io.gfile.GFile(path, 'rb').read()
    image = Image.open(BytesIO(image_data))
    return pil_image_as_numpy_array(image)            

def load_model():
    model_path = snapshot_download(MODEL_REPO)

    model_dir = os.path.join(model_path, 'saved_model')
    detection_model = tf.saved_model.load(model_dir)
    return detection_model    


def predict(pilimg):

    image_np = pil_image_as_numpy_array(pilimg)
    return predict2(image_np)

def predict2(image_np):
    results = detection_model(image_np)

    # different object detection models have additional results
    result = {key:value.numpy() for key,value in results.items()}
    
    label_id_offset = 0
    image_np_with_detections = image_np.copy()

    viz_utils.visualize_boxes_and_labels_on_image_array(
        image_np_with_detections[0],
        result['detection_boxes'][0],
        (result['detection_classes'][0] + label_id_offset).astype(int),
        result['detection_scores'][0],
        category_index,
        use_normalized_coordinates=True,
        max_boxes_to_draw=200,
        min_score_thresh=.60,
        agnostic_mode=False,
        line_thickness=2)

    result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0])
    
    return result_pil_img

detection_model = load_model()

gr.Interface(fn=predict,
             inputs=[gr.Image(type="pil")],
             outputs=gr.Image(type="pil")
             ).launch(share=True)