23B073A / app.py
YEHTUT's picture
Update app.py
c77a11e verified
raw
history blame
No virus
5.09 kB
import matplotlib.pyplot as plt
import numpy as np
from six import BytesIO
from PIL import Image
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.utils import ops as utils_op
import tarfile
import wget
import gradio as gr
from huggingface_hub import snapshot_download
import os
PATH_TO_LABELS = 'data/label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
def pil_image_as_numpy_array(pilimg):
img_array = tf.keras.utils.img_to_array(pilimg)
img_array = np.expand_dims(img_array, axis=0)
return img_array
def load_image_into_numpy_array(path):
image = None
image_data = tf.io.gfile.GFile(path, 'rb').read()
image = Image.open(BytesIO(image_data))
return pil_image_as_numpy_array(image)
def load_model():
download_dir = snapshot_download(REPO_ID)
saved_model_dir = os.path.join(download_dir, "saved_model")
detection_model = tf.saved_model.load(saved_model_dir)
return detection_model
def load_model2():
wget.download("https://nyp-aicourse.s3-ap-southeast-1.amazonaws.com/pretrained-models/balloon_model.tar.gz")
tarfile.open("balloon_model.tar.gz").extractall()
model_dir = 'saved_model'
detection_model = tf.saved_model.load(str(model_dir))
return detection_model
# samples_folder = 'test_samples
# image_path = 'test_samples/sample_balloon.jpeg
#
def predict(pilimg):
# Check if the input is an image or a video
#if isinstance(pilimg, np.ndarray): # Input is an image
image_np = pil_image_as_numpy_array(pilimg)
return predict2(image_np)
# else:
# print("This is video file")
def predict2(image_np):
results = detection_model(image_np)
# different object detection models have additional results
result = {key:value.numpy() for key,value in results.items()}
label_id_offset = 0
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections[0],
result['detection_boxes'][0],
(result['detection_classes'][0] + label_id_offset).astype(int),
result['detection_scores'][0],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=.60,
agnostic_mode=False,
line_thickness=2)
result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0])
return result_pil_img
def write_video(video_in_filepath, video_out_filepath, detection_model):
video_reader = cv2.VideoCapture(video_in_filepath)
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = video_reader.get(cv2.CAP_PROP_FPS)
video_writer = cv2.VideoWriter(video_out_filepath,
cv2.VideoWriter_fourcc(*'mp4v'),
fps,
(frame_w, frame_h))
for i in tqdm(range(nb_frames)):
ret, image_np = video_reader.read()
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.uint8)
results = detection_model(input_tensor)
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np,
results['detection_boxes'][0].numpy(),
(results['detection_classes'][0].numpy()+ label_id_offset).astype(int),
results['detection_scores'][0].numpy(),
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=.50,
agnostic_mode=False,
line_thickness=2)
video_writer.write(np.uint8(image_np))
# Release camera and close windows
video_reader.release()
video_writer.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
def predict_video (video_file_name):
detected_video_file = "detected_video.mp4"
write_video(video_file_name,detected_video_file,detection_model)
return detected_video_file
REPO_ID = "YEHTUT/tfodmodel"
detection_model = load_model()
# pil_image = Image.open(image_path)
# image_arr = pil_image_as_numpy_array(pil_image)
# predicted_img = predict(image_arr)
# predicted_img.save('predicted.jpg')
Image_tab = gr.Interface(fn=predict,
inputs=gr.Image(type="pil"),
outputs=gr.Image(type="pil")
)
Video_tab = gr.Interface(fn=predict_video,
inputs=gr.Video,
outputs=gr.Video
)
gr.TabbedInterface([Image_tab, Video_tab], ["Image", "Video"]).launch(share=True)
#gr.Interface(fn=predict,
# inputs=gr.Image(type="pil"),
# outputs=gr.Image(type="pil")
# ).launch(share=True)