import matplotlib.pyplot as plt import numpy as np from six import BytesIO from PIL import Image import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as viz_utils from object_detection.utils import ops as utils_op import tarfile import wget import gradio as gr from huggingface_hub import snapshot_download import os import cv2 from tqdm import tqdm PATH_TO_LABELS = 'data/label_map.pbtxt' category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) def pil_image_as_numpy_array(pilimg): img_array = tf.keras.utils.img_to_array(pilimg) img_array = np.expand_dims(img_array, axis=0) return img_array def load_image_into_numpy_array(path): image = None image_data = tf.io.gfile.GFile(path, 'rb').read() image = Image.open(BytesIO(image_data)) return pil_image_as_numpy_array(image) def load_model(): download_dir = snapshot_download(REPO_ID) saved_model_dir = os.path.join(download_dir, "saved_model") detection_model = tf.saved_model.load(saved_model_dir) return detection_model def predict(pilimg): image_np = pil_image_as_numpy_array(pilimg) return predict2(image_np) def predict2(image_np): results = detection_model(image_np) # different object detection models have additional results result = {key:value.numpy() for key,value in results.items()} label_id_offset = 0 image_np_with_detections = image_np.copy() viz_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections[0], result['detection_boxes'][0], (result['detection_classes'][0] + label_id_offset).astype(int), result['detection_scores'][0], category_index, use_normalized_coordinates=True, max_boxes_to_draw=200, min_score_thresh=.60, agnostic_mode=False, line_thickness=2) result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0]) return result_pil_img def detect_video(video): # Create a video capture object cap = cv2.VideoCapture(video) nb_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) fps = cap.get(cv2.CAP_PROP_FPS) # Process frames in a loop while cap.isOpened(): ret, frame = cap.read() if not ret: break for i in tqdm(range(nb_frames)): ret, image_np = video_reader.read() input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.uint8) results = detection_model(input_tensor) image_np_with_detections = viz_utils.visualize_boxes_and_labels_on_image_array( image_np, results['detection_boxes'][0].numpy(), (results['detection_classes'][0].numpy()+ label_id_offset).astype(int), results['detection_scores'][0].numpy(), category_index, use_normalized_coordinates=True, max_boxes_to_draw=200, min_score_thresh=.50, agnostic_mode=False, line_thickness=2) # Yield the processed frame yield image_np_with_detections # Release resources cap.release() REPO_ID = "apailang/mytfodmodel" detection_model = load_model() # pil_image = Image.open(image_path) # image_arr = pil_image_as_numpy_array(pil_image) # predicted_img = predict(image_arr) # predicted_img.save('predicted.jpg') test1 = os.path.join(os.path.dirname(__file__), "data/test1.jpeg") test2 = os.path.join(os.path.dirname(__file__), "data/test2.jpeg") test3 = os.path.join(os.path.dirname(__file__), "data/test3.jpeg") test4 = os.path.join(os.path.dirname(__file__), "data/test4.jpeg") test5 = os.path.join(os.path.dirname(__file__), "data/test5.jpeg") test6 = os.path.join(os.path.dirname(__file__), "data/test6.jpeg") test7 = os.path.join(os.path.dirname(__file__), "data/test7.jpeg") test8 = os.path.join(os.path.dirname(__file__), "data/test8.jpeg") test9 = os.path.join(os.path.dirname(__file__), "data/test9.jpeg") test10 = os.path.join(os.path.dirname(__file__), "data/test10.jpeg") test11 = os.path.join(os.path.dirname(__file__), "data/test11.jpeg") test12 = os.path.join(os.path.dirname(__file__), "data/test12.jpeg") tts_demo = gr.Interface( fn=predict, inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"), title="Image Prediction Interface", description="Upload a Image for prediction", examples=[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],], cache_examples=True )#.launch(share=True) a = os.path.join(os.path.dirname(__file__), "data/a.mp4") # Video b = os.path.join(os.path.dirname(__file__), "data/b.mp4") # Video c = os.path.join(os.path.dirname(__file__), "data/c.mp4") # Video stt_demo = gr.Interface( fn=detect_video, #detect_video inputs=gr.Video(), outputs=gr.Video(), examples=[ [a], [b], [c], ], cache_examples=True ) demo = gr.TabbedInterface([tts_demo, stt_demo], ["Image", "Video"]) if __name__ == "__main__": demo.launch()