Splasher's picture
Upload app.py
27263a7
raw
history blame
No virus
9.12 kB
#!/usr/bin/env python
# coding: utf-8
# In[30]:
import cv2
import numpy as np
import tensorflow as tf
#from sklearn.metrics import confusion_matrix
import itertools
import os, glob
from tqdm import tqdm
#from efficientnet.tfkeras import EfficientNetB4
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image
from tensorflow.keras.utils import img_to_array, array_to_img
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import LabelBinarizer
from IPython.display import clear_output
import warnings
warnings.filterwarnings('ignore')
import cv2
import gradio as gr
# In[46]:
labels = {0: 'Normal', 1: 'RoadAccidents', 2: 'Violent'}
# In[47]:
model = keras.models.load_model("AniketModel.h5", compile=False)
# In[48]:
def videoToFrames(video):
# Read the video from specified path
cam = cv2.VideoCapture(video)
'''
try:
# creating a folder named data
if not os.path.exists('/home/shubham/__New-D__/VITA/Project/redundant/data/Abuse'):
os.makedirs('/home/shubham/__New-D__/VITA/Project/redundant/data/Abuse')
# if not created then raise error
except OSError:
print ('Error: Creating directory of data')
'''
# frame
currentframe = 1
while(True):
# reading from frame
ret,frame = cam.read()
if ret:
# if video is still left continue creating images
# name = '/home/shubham/__New-D__/VITA/Project/redundant/data/Abuse/frame' + str(currentframe) + '.jpg'
# print ('Creating...' + name)
# writing the extracted images
# cv2.imwrite(name, frame)
# increasing counter so that it will
# show how many frames are created
currentframe += 1
else:
break
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
return currentframe
# In[49]:
def hconcat_resize(img_list, interpolation=cv2.INTER_CUBIC):
# take minimum hights
h_min = min(img.shape[0] for img in img_list)
# image resizing
im_list_resize = [cv2.resize(img,
(int(img.shape[1] * h_min / img.shape[0]),
h_min), interpolation
= interpolation)
for img in img_list]
return cv2.hconcat(im_list_resize)
# In[55]:
def make_average_predictions(video_file_path, predictions_frames_count):
confidences = {}
number_of_classes = 3
# Initializing the Numpy array which will store Prediction Probabilities
#predicted_labels_probabilities_np = np.zeros((predictions_frames_count, number_of_classes), dtype = np.float)
# Reading the Video File using the VideoCapture Object
video_reader = cv2.VideoCapture(video_file_path)
#print(video_reader)
# Getting The Total Frames present in the video
video_frames_count = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
# print(video_frames_count)
# Calculating The Number of Frames to skip Before reading a frame
skip_frames_window = video_frames_count // predictions_frames_count
# print(skip_frames_window)
frame_counter = 1
count = 0
features = []
for frame_counter in range(predictions_frames_count):
try:
frames = []
# Setting Frame Position
#video_reader.set(cv2.CAP_PROP_POS_FRAMES, frame_counter * skip_frames_window)
# Reading The Frame
_ , frame = video_reader.read()
#print(frame)
image_height, image_width = 128, 128
# Resize the Frame to fixed Dimensions
resized_frame = cv2.resize(frame, (image_height, image_width))
# Normalize the resized frame by dividing it with 255 so that each pixel value then lies between 0 and 1
normalized_frame = resized_frame / 255
#print(normalized_frame)
#normalized_frame = np.vstack([normalized_frame])
#normalized_frame = image.img_to_array(normalized_frame)
#print(frs.shape)
#print(normalized_frame.shape)
#normalized_frame = image.array_to_img(normalized_frame)
frames.append(normalized_frame)
if frame_counter % 16 == 0:
#frs = np.append(frs, normalized_frame)
#print(frames)
images = cv2.hconcat(frames)
#cv2.imshow('', images)
images = cv2.resize(images, (128, 128))
#images = images / 255
X = image.img_to_array(images)
X = np.expand_dims(X, axis=0)
images = np.vstack([X])
#print(images.shape)
#print(images)
# Passing the Image Normalized Frame to the model and receiving Predicted Probabilities.
predicted_labels_probabilities = model.predict(images)
#print(predicted_labels_probabilities)
#predicted_labels_probabilities = model.predict(images)[0]
# Appending predicted label probabilities to the deque object
predicted_labels_probabilities = np.squeeze(predicted_labels_probabilities)
print(predicted_labels_probabilities)
#predicted_labels_probabilities_np[frame_counter] = predicted_labels_probabilities
prediction = np.argmax(predicted_labels_probabilities)
print(prediction)
output = labels[prediction]
print(output)
if normalized_frame is not None:
features.append(prediction)
#print(frame_counter)
#print(features)
frames = []
if count < 10:
count += 1
#print(count)
else:
break
except:
break
"""# Calculating Average of Predicted Labels Probabilities Column Wise
predicted_labels_probabilities_averaged = predicted_labels_probabilities_np.mean(axis = 0)
# Sorting the Averaged Predicted Labels Probabilities
predicted_labels_probabilities_averaged_sorted_indexes = np.argsort(predicted_labels_probabilities_averaged)[::-1]
predicted_labels_probabilities_averaged_sorted_indexes = predicted_labels_probabilities_averaged_sorted_indexes[:3]
# Iterating Over All Averaged Predicted Label Probabilities
for predicted_label in predicted_labels_probabilities_averaged_sorted_indexes:
# Accessing The Class Name using predicted label.
predicted_class_name = labels[predicted_label]
# Accessing The Averaged Probability using predicted label.
predicted_probability = predicted_labels_probabilities_averaged[predicted_label]
print(f"CLASS NAME: {predicted_class_name} AVERAGED PROBABILITY: {(predicted_probability*100):.2}")
confidences[predicted_class_name]=predicted_probability
# Closing the VideoCapture Object and releasing all resources held by it.
video_reader.release()"""
return confidences, features
# In[56]:
def most_frequent(List):
counter = 0
num = List[0]
for i in List:
curr_frequency = List.count(i)
if(curr_frequency> counter):
counter = curr_frequency
num = i
return num
# In[64]:
video = "/home/shubham/__New-D__/VITA/Project/redundant/production ID_4959443.mp4"
#labels = {0: 'RoadAccidents', 1: 'Normal', 2: 'Violent'}
framecount = videoToFrames(video)
confidences, features = make_average_predictions(video, framecount)
List = most_frequent(features)
print("The Video You Have Entered is of",labels.get(List))
#print(confidences)
# In[53]:
"""def classify_video(video):
labels = {0: 'RoadAccidents', 1: 'Normal', 2: 'Violent'}
framecount = videoToFrames(video)
confidences, features = make_average_predictions(video, framecount)
List = most_frequent(features)
#print("The Video You Have Entered is of",labels.get(List))
return labels.get(List)
demo = gr.Interface(classify_video,
inputs=gr.Video(),
outputs=gr.outputs.Label(),
cache_examples=True)
if __name__ == "__main__":
demo.launch(share=False)"""
# In[ ]:
# In[ ]: