Spaces:
Sleeping
Sleeping
Kousik Kumar Siddavaram
commited on
Commit
·
852a5d6
1
Parent(s):
a42a3d3
Added backup files again
Browse files- app/Hackathon_setup/exp_recognition_bkp.py +71 -0
- app/Hackathon_setup/exp_recognition_model_bkp.py +31 -0
- app/Hackathon_setup/face_recognition_bkp.py +115 -0
- app/Hackathon_setup/face_recognition_model_bkp.py +80 -0
- app/Hackathon_setup/team_classifier_bkp.joblib +3 -0
- app/main_bkp.py +148 -0
- app/templates/expr_recognition_bkp.html +32 -0
- app/templates/face_recognition_bkp.html +32 -0
- app/templates/index_bkp.html +29 -0
- app/templates/predict_expr_recognition_bkp.html +37 -0
- app/templates/predict_face_recognition_bkp.html +37 -0
- app/templates/predict_similarity_bkp.html +38 -0
- app/templates/similarity_bkp.html +35 -0
app/Hackathon_setup/exp_recognition_bkp.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
from matplotlib import pyplot as plt
|
| 4 |
+
import torch
|
| 5 |
+
# In the below line,remove '.' while working on your local system.However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
|
| 6 |
+
from .exp_recognition_model import *
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import base64
|
| 9 |
+
import io
|
| 10 |
+
import os
|
| 11 |
+
## Add more imports if required
|
| 12 |
+
|
| 13 |
+
#############################################################################################################################
|
| 14 |
+
# Caution: Don't change any of the filenames, function names and definitions #
|
| 15 |
+
# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
|
| 16 |
+
#############################################################################################################################
|
| 17 |
+
|
| 18 |
+
# Current_path stores absolute path of the file from where it runs.
|
| 19 |
+
current_path = os.path.dirname(os.path.abspath(__file__))
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
#1) The below function is used to detect faces in the given image.
|
| 23 |
+
#2) It returns only one image which has maximum area out of all the detected faces in the photo.
|
| 24 |
+
#3) If no face is detected,then it returns zero(0).
|
| 25 |
+
|
| 26 |
+
def detected_face(image):
|
| 27 |
+
eye_haar = current_path + '/haarcascade_eye.xml'
|
| 28 |
+
face_haar = current_path + '/haarcascade_frontalface_default.xml'
|
| 29 |
+
face_cascade = cv2.CascadeClassifier(face_haar)
|
| 30 |
+
eye_cascade = cv2.CascadeClassifier(eye_haar)
|
| 31 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 32 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
| 33 |
+
face_areas=[]
|
| 34 |
+
images = []
|
| 35 |
+
required_image=0
|
| 36 |
+
for i, (x,y,w,h) in enumerate(faces):
|
| 37 |
+
face_cropped = gray[y:y+h, x:x+w]
|
| 38 |
+
face_areas.append(w*h)
|
| 39 |
+
images.append(face_cropped)
|
| 40 |
+
required_image = images[np.argmax(face_areas)]
|
| 41 |
+
required_image = Image.fromarray(required_image)
|
| 42 |
+
return required_image
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
#1) Images captured from mobile is passed as parameter to the below function in the API call, It returns the Expression detected by your network.
|
| 46 |
+
#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function.
|
| 47 |
+
#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode.
|
| 48 |
+
#4) Perform necessary transformations to the input(detected face using the above function), this should return the Expression in string form ex: "Anger"
|
| 49 |
+
#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function
|
| 50 |
+
##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
|
| 51 |
+
def get_expression(img):
|
| 52 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 53 |
+
|
| 54 |
+
##########################################################################################
|
| 55 |
+
##Example for loading a model using weight state dictionary: ##
|
| 56 |
+
## face_det_net = facExpRec() #Example Network ##
|
| 57 |
+
## model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device) ##
|
| 58 |
+
## face_det_net.load_state_dict(model['net_dict']) ##
|
| 59 |
+
## ##
|
| 60 |
+
##current_path + '/<network_definition>' is path of the saved model if present in ##
|
| 61 |
+
##the same path as this file, we recommend to put in the same directory ##
|
| 62 |
+
##########################################################################################
|
| 63 |
+
##########################################################################################
|
| 64 |
+
|
| 65 |
+
face = detected_face(img)
|
| 66 |
+
if face==0:
|
| 67 |
+
face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
|
| 68 |
+
|
| 69 |
+
# YOUR CODE HERE, return expression using your model
|
| 70 |
+
|
| 71 |
+
return "YET TO BE CODED"
|
app/Hackathon_setup/exp_recognition_model_bkp.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torchvision import transforms
|
| 5 |
+
## Add more imports if required
|
| 6 |
+
|
| 7 |
+
####################################################################################################################
|
| 8 |
+
# Define your model and transform and all necessary helper functions here #
|
| 9 |
+
# They will be imported to the exp_recognition.py file #
|
| 10 |
+
####################################################################################################################
|
| 11 |
+
|
| 12 |
+
# Definition of classes as dictionary
|
| 13 |
+
classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5: 'SADNESS', 6: 'SURPRISE'}
|
| 14 |
+
|
| 15 |
+
# Example Network
|
| 16 |
+
class facExpRec(torch.nn.Module):
|
| 17 |
+
def __init__(self):
|
| 18 |
+
pass # remove 'pass' once you have written your code
|
| 19 |
+
#YOUR CODE HERE
|
| 20 |
+
|
| 21 |
+
def forward(self, x):
|
| 22 |
+
pass # remove 'pass' once you have written your code
|
| 23 |
+
#YOUR CODE HERE
|
| 24 |
+
|
| 25 |
+
# Sample Helper function
|
| 26 |
+
def rgb2gray(image):
|
| 27 |
+
return image.convert('L')
|
| 28 |
+
|
| 29 |
+
# Sample Transformation function
|
| 30 |
+
#YOUR CODE HERE for changing the Transformation values.
|
| 31 |
+
trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor()])
|
app/Hackathon_setup/face_recognition_bkp.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
from matplotlib import pyplot as plt
|
| 4 |
+
import torch
|
| 5 |
+
# In the below line, remove '.' while working on your local system.
|
| 6 |
+
# Keep '.' before face_recognition_model while uploading to the server
|
| 7 |
+
from .face_recognition_model import *
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import base64
|
| 10 |
+
import io
|
| 11 |
+
import os
|
| 12 |
+
import joblib
|
| 13 |
+
import pickle
|
| 14 |
+
# Add more imports if required
|
| 15 |
+
|
| 16 |
+
###########################################################################################################################################
|
| 17 |
+
# Caution: Don't change any of the filenames, function names and definitions #
|
| 18 |
+
# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
|
| 19 |
+
###########################################################################################################################################
|
| 20 |
+
|
| 21 |
+
# Current_path stores absolute path of the file from where it runs.
|
| 22 |
+
current_path = os.path.dirname(os.path.abspath(__file__))
|
| 23 |
+
|
| 24 |
+
# -------------------------
|
| 25 |
+
# Face Detection
|
| 26 |
+
# -------------------------
|
| 27 |
+
def detected_face(image):
|
| 28 |
+
eye_haar = current_path + '/haarcascade_eye.xml'
|
| 29 |
+
face_haar = current_path + '/haarcascade_frontalface_default.xml'
|
| 30 |
+
face_cascade = cv2.CascadeClassifier(face_haar)
|
| 31 |
+
eye_cascade = cv2.CascadeClassifier(eye_haar)
|
| 32 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 33 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
| 34 |
+
face_areas = []
|
| 35 |
+
images = []
|
| 36 |
+
required_image = 0
|
| 37 |
+
for i, (x, y, w, h) in enumerate(faces):
|
| 38 |
+
face_cropped = gray[y:y+h, x:x+w]
|
| 39 |
+
face_areas.append(w*h)
|
| 40 |
+
images.append(face_cropped)
|
| 41 |
+
required_image = images[np.argmax(face_areas)]
|
| 42 |
+
required_image = Image.fromarray(required_image)
|
| 43 |
+
return required_image
|
| 44 |
+
|
| 45 |
+
# -------------------------
|
| 46 |
+
# Compute Similarity
|
| 47 |
+
# -------------------------
|
| 48 |
+
def get_similarity(img1, img2):
|
| 49 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 50 |
+
|
| 51 |
+
# Detect faces
|
| 52 |
+
det_img1 = detected_face(img1)
|
| 53 |
+
det_img2 = detected_face(img2)
|
| 54 |
+
if det_img1 == 0 or det_img2 == 0:
|
| 55 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 56 |
+
det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
|
| 57 |
+
|
| 58 |
+
# Transform images
|
| 59 |
+
face1 = trnscm(det_img1).unsqueeze(0).to(device)
|
| 60 |
+
face2 = trnscm(det_img2).unsqueeze(0).to(device)
|
| 61 |
+
|
| 62 |
+
# -------------------------
|
| 63 |
+
# Load Siamese Model
|
| 64 |
+
# -------------------------
|
| 65 |
+
model_path = current_path + '/siamese_model.t7'
|
| 66 |
+
checkpoint = torch.load(model_path, map_location=device)
|
| 67 |
+
feature_net = Siamese().to(device)
|
| 68 |
+
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 69 |
+
feature_net.eval()
|
| 70 |
+
|
| 71 |
+
# -------------------------
|
| 72 |
+
# Compute similarity (Euclidean distance)
|
| 73 |
+
# -------------------------
|
| 74 |
+
with torch.no_grad():
|
| 75 |
+
output1, output2 = feature_net(face1, face2)
|
| 76 |
+
euclidean_distance = F.pairwise_distance(output1, output2)
|
| 77 |
+
# Convert distance to similarity score (0–1)
|
| 78 |
+
similarity_score = 1 / (1 + euclidean_distance.item())
|
| 79 |
+
|
| 80 |
+
return round(similarity_score, 3)
|
| 81 |
+
|
| 82 |
+
# -------------------------
|
| 83 |
+
# Get Face Class
|
| 84 |
+
# -------------------------
|
| 85 |
+
def get_face_class(img1):
|
| 86 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 87 |
+
|
| 88 |
+
det_img1 = detected_face(img1)
|
| 89 |
+
if det_img1 == 0:
|
| 90 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 91 |
+
|
| 92 |
+
# Transform image
|
| 93 |
+
face = trnscm(det_img1).unsqueeze(0).to(device)
|
| 94 |
+
|
| 95 |
+
# Load Siamese model
|
| 96 |
+
model_path = current_path + '/siamese_model.t7'
|
| 97 |
+
checkpoint = torch.load(model_path, map_location=device)
|
| 98 |
+
feature_net = Siamese().to(device)
|
| 99 |
+
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 100 |
+
feature_net.eval()
|
| 101 |
+
|
| 102 |
+
# -------------------------
|
| 103 |
+
# Use Siamese + classifier to predict class (if classifier exists)
|
| 104 |
+
# -------------------------
|
| 105 |
+
# If you have a trained classifier that takes embeddings from Siamese as input:
|
| 106 |
+
# classifier = <your classifier>
|
| 107 |
+
# with torch.no_grad():
|
| 108 |
+
# embedding = feature_net.forward_once(face)
|
| 109 |
+
# pred = classifier(embedding)
|
| 110 |
+
# predicted_class = classes[pred.argmax(dim=1).item()]
|
| 111 |
+
|
| 112 |
+
# Since classifier is not trained here, return placeholder
|
| 113 |
+
predicted_class = "YET TO BE CODED"
|
| 114 |
+
|
| 115 |
+
return predicted_class
|
app/Hackathon_setup/face_recognition_model_bkp.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torchvision
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from torchvision import transforms
|
| 7 |
+
# Add more imports if required
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# ---------------------------
|
| 11 |
+
# Transformation Function
|
| 12 |
+
# ---------------------------
|
| 13 |
+
# Same transforms as used during training in Colab
|
| 14 |
+
trnscm = transforms.Compose([
|
| 15 |
+
transforms.Resize((100, 100)),
|
| 16 |
+
transforms.ToTensor()
|
| 17 |
+
])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# ---------------------------
|
| 21 |
+
# Siamese Network Definition
|
| 22 |
+
# ---------------------------
|
| 23 |
+
class Siamese(torch.nn.Module):
|
| 24 |
+
def __init__(self):
|
| 25 |
+
super(Siamese, self).__init__()
|
| 26 |
+
|
| 27 |
+
# CNN layers (same as your Colab model)
|
| 28 |
+
self.cnn1 = nn.Sequential(
|
| 29 |
+
nn.ReflectionPad2d(1),
|
| 30 |
+
nn.Conv2d(1, 4, kernel_size=3),
|
| 31 |
+
nn.ReLU(inplace=True),
|
| 32 |
+
nn.BatchNorm2d(4),
|
| 33 |
+
|
| 34 |
+
nn.ReflectionPad2d(1),
|
| 35 |
+
nn.Conv2d(4, 8, kernel_size=3),
|
| 36 |
+
nn.ReLU(inplace=True),
|
| 37 |
+
nn.BatchNorm2d(8),
|
| 38 |
+
|
| 39 |
+
nn.ReflectionPad2d(1),
|
| 40 |
+
nn.Conv2d(8, 8, kernel_size=3),
|
| 41 |
+
nn.ReLU(inplace=True),
|
| 42 |
+
nn.BatchNorm2d(8)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# Fully connected layers
|
| 46 |
+
self.fc1 = nn.Sequential(
|
| 47 |
+
nn.Linear(8 * 100 * 100, 500),
|
| 48 |
+
nn.ReLU(inplace=True),
|
| 49 |
+
nn.Linear(500, 500),
|
| 50 |
+
nn.ReLU(inplace=True),
|
| 51 |
+
nn.Linear(500, 5)
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def forward_once(self, x):
|
| 55 |
+
# Forward pass for one image
|
| 56 |
+
output = self.cnn1(x)
|
| 57 |
+
output = output.view(output.size()[0], -1)
|
| 58 |
+
output = self.fc1(output)
|
| 59 |
+
return output
|
| 60 |
+
|
| 61 |
+
def forward(self, x1, x2):
|
| 62 |
+
# Forward pass for both images
|
| 63 |
+
output1 = self.forward_once(x1)
|
| 64 |
+
output2 = self.forward_once(x2)
|
| 65 |
+
return output1, output2
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
##########################################################################################################
|
| 69 |
+
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
|
| 70 |
+
## classifier = nn.Sequential(nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Linear...) ##
|
| 71 |
+
##########################################################################################################
|
| 72 |
+
|
| 73 |
+
# Not used for face similarity — so keep it as None
|
| 74 |
+
classifier = None
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# ---------------------------
|
| 78 |
+
# Class labels (optional)
|
| 79 |
+
# ---------------------------
|
| 80 |
+
classes = ['person1', 'person2', 'person3', 'person4', 'person5', 'person6', 'person7']
|
app/Hackathon_setup/team_classifier_bkp.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9c07b411ab1fa2f2365146b188ea0d4438b5317b88c81b256766c4c5ada3e93
|
| 3 |
+
size 1761
|
app/main_bkp.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
sys.path.append(str(Path(__file__).resolve().parent.parent))
|
| 4 |
+
#print(sys.path)
|
| 5 |
+
from typing import Any
|
| 6 |
+
|
| 7 |
+
from fastapi import FastAPI, Request, APIRouter, File, UploadFile
|
| 8 |
+
from fastapi.staticfiles import StaticFiles
|
| 9 |
+
from fastapi.templating import Jinja2Templates
|
| 10 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 11 |
+
from app.config import settings
|
| 12 |
+
from app import __version__
|
| 13 |
+
from app.Hackathon_setup import face_recognition, exp_recognition
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
from PIL import Image
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
app = FastAPI(
|
| 20 |
+
title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json"
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# To store files uploaded by users
|
| 24 |
+
app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
| 25 |
+
|
| 26 |
+
# To access Templates directory
|
| 27 |
+
templates = Jinja2Templates(directory="app/templates")
|
| 28 |
+
|
| 29 |
+
simi_filename1 = None
|
| 30 |
+
simi_filename2 = None
|
| 31 |
+
face_rec_filename = None
|
| 32 |
+
expr_rec_filename = None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
#################################### Home Page endpoints #################################################
|
| 36 |
+
@app.get("/")
|
| 37 |
+
async def root(request: Request):
|
| 38 |
+
return templates.TemplateResponse("index.html", {'request': request,})
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
#################################### Face Similarity endpoints #################################################
|
| 42 |
+
@app.get("/similarity/")
|
| 43 |
+
async def similarity_root(request: Request):
|
| 44 |
+
return templates.TemplateResponse("similarity.html", {'request': request,})
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@app.post("/predict_similarity/")
|
| 48 |
+
async def create_upload_files(request: Request, file1: UploadFile = File(...), file2: UploadFile = File(...)):
|
| 49 |
+
global simi_filename1
|
| 50 |
+
global simi_filename2
|
| 51 |
+
|
| 52 |
+
if 'image' in file1.content_type:
|
| 53 |
+
contents = await file1.read()
|
| 54 |
+
simi_filename1 = 'app/static/' + file1.filename
|
| 55 |
+
with open(simi_filename1, 'wb') as f:
|
| 56 |
+
f.write(contents)
|
| 57 |
+
|
| 58 |
+
if 'image' in file2.content_type:
|
| 59 |
+
contents = await file2.read()
|
| 60 |
+
simi_filename2 = 'app/static/' + file2.filename
|
| 61 |
+
with open(simi_filename2, 'wb') as f:
|
| 62 |
+
f.write(contents)
|
| 63 |
+
|
| 64 |
+
img1 = Image.open(simi_filename1)
|
| 65 |
+
img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
|
| 66 |
+
|
| 67 |
+
img2 = Image.open(simi_filename2)
|
| 68 |
+
img2 = np.array(img2).reshape(img2.size[1], img2.size[0], 3).astype(np.uint8)
|
| 69 |
+
|
| 70 |
+
result = face_recognition.get_similarity(img1, img2)
|
| 71 |
+
#print(result)
|
| 72 |
+
|
| 73 |
+
return templates.TemplateResponse("predict_similarity.html", {"request": request,
|
| 74 |
+
"result": np.round(result, 3),
|
| 75 |
+
"simi_filename1": '../static/'+file1.filename,
|
| 76 |
+
"simi_filename2": '../static/'+file2.filename,})
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
#################################### Face Recognition endpoints #################################################
|
| 80 |
+
@app.get("/face_recognition/")
|
| 81 |
+
async def face_recognition_root(request: Request):
|
| 82 |
+
return templates.TemplateResponse("face_recognition.html", {'request': request,})
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@app.post("/predict_face_recognition/")
|
| 86 |
+
async def create_upload_files(request: Request, file3: UploadFile = File(...)):
|
| 87 |
+
global face_rec_filename
|
| 88 |
+
|
| 89 |
+
if 'image' in file3.content_type:
|
| 90 |
+
contents = await file3.read()
|
| 91 |
+
face_rec_filename = 'app/static/' + file3.filename
|
| 92 |
+
with open(face_rec_filename, 'wb') as f:
|
| 93 |
+
f.write(contents)
|
| 94 |
+
|
| 95 |
+
img1 = Image.open(face_rec_filename)
|
| 96 |
+
img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
|
| 97 |
+
|
| 98 |
+
result = face_recognition.get_face_class(img1)
|
| 99 |
+
print(result)
|
| 100 |
+
|
| 101 |
+
return templates.TemplateResponse("predict_face_recognition.html", {"request": request,
|
| 102 |
+
"result": result,
|
| 103 |
+
"face_rec_filename": '../static/'+file3.filename,})
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
#################################### Expresion Recognition endpoints #################################################
|
| 107 |
+
@app.get("/expr_recognition/")
|
| 108 |
+
async def expr_recognition_root(request: Request):
|
| 109 |
+
return templates.TemplateResponse("expr_recognition.html", {'request': request,})
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@app.post("/predict_expr_recognition/")
|
| 113 |
+
async def create_upload_files(request: Request, file4: UploadFile = File(...)):
|
| 114 |
+
global expr_rec_filename
|
| 115 |
+
|
| 116 |
+
if 'image' in file4.content_type:
|
| 117 |
+
contents = await file4.read()
|
| 118 |
+
expr_rec_filename = 'app/static/' + file4.filename
|
| 119 |
+
with open(expr_rec_filename, 'wb') as f:
|
| 120 |
+
f.write(contents)
|
| 121 |
+
|
| 122 |
+
img1 = Image.open(expr_rec_filename)
|
| 123 |
+
img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
|
| 124 |
+
|
| 125 |
+
result = exp_recognition.get_expression(img1)
|
| 126 |
+
print(result)
|
| 127 |
+
|
| 128 |
+
return templates.TemplateResponse("predict_expr_recognition.html", {"request": request,
|
| 129 |
+
"result": result,
|
| 130 |
+
"expr_rec_filename": '../static/'+file4.filename,})
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# Set all CORS enabled origins
|
| 135 |
+
if settings.BACKEND_CORS_ORIGINS:
|
| 136 |
+
app.add_middleware(
|
| 137 |
+
CORSMiddleware,
|
| 138 |
+
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
|
| 139 |
+
allow_credentials=True,
|
| 140 |
+
allow_methods=["*"],
|
| 141 |
+
allow_headers=["*"],
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# Start app
|
| 146 |
+
if __name__ == "__main__":
|
| 147 |
+
import uvicorn
|
| 148 |
+
uvicorn.run(app, host="0.0.0.0", port=8001)
|
app/templates/expr_recognition_bkp.html
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Expression Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<!li>
|
| 16 |
+
<br>
|
| 17 |
+
<form action="/predict_expr_recognition/" enctype="multipart/form-data" method="post">
|
| 18 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload Image:</span> <br><br>
|
| 19 |
+
<input name="file4" type="file" onchange="readURL(this);" />
|
| 20 |
+
<br><br><br>
|
| 21 |
+
<button type="submit">Recognize Expression</button>
|
| 22 |
+
</form>
|
| 23 |
+
<!/li>
|
| 24 |
+
<br><br>
|
| 25 |
+
<form action="/" method="get">
|
| 26 |
+
<button type="submit">Home</button>
|
| 27 |
+
</form>
|
| 28 |
+
</ul>
|
| 29 |
+
</fieldset>
|
| 30 |
+
</div>
|
| 31 |
+
</body>
|
| 32 |
+
</html>
|
app/templates/face_recognition_bkp.html
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<!li>
|
| 16 |
+
<br>
|
| 17 |
+
<form action="/predict_face_recognition/" enctype="multipart/form-data" method="post">
|
| 18 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload Image:</span> <br><br>
|
| 19 |
+
<input name="file3" type="file" onchange="readURL(this);" />
|
| 20 |
+
<br><br><br>
|
| 21 |
+
<button type="submit">Recognize Face</button>
|
| 22 |
+
</form>
|
| 23 |
+
<!/li>
|
| 24 |
+
<br><br>
|
| 25 |
+
<form action="/" method="get">
|
| 26 |
+
<button type="submit">Home</button>
|
| 27 |
+
</form>
|
| 28 |
+
</ul>
|
| 29 |
+
</fieldset>
|
| 30 |
+
</div>
|
| 31 |
+
</body>
|
| 32 |
+
</html>
|
app/templates/index_bkp.html
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Recognition Application</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<li><span style="font-weight:bold;font-family:sans-serif">Select a task:</span>
|
| 16 |
+
<br><br><br>
|
| 17 |
+
<form action="{{ url_for('similarity_root') }}"><button>Face Similarity</button></form>
|
| 18 |
+
<br><br>
|
| 19 |
+
<form action="{{ url_for('face_recognition_root') }}"><button>Face Recognition</button></form>
|
| 20 |
+
<br><br>
|
| 21 |
+
<form action="{{ url_for('expr_recognition_root') }}"><button>Expression Recognition</button></form>
|
| 22 |
+
<br>
|
| 23 |
+
</li>
|
| 24 |
+
<br>
|
| 25 |
+
</ul>
|
| 26 |
+
</fieldset>
|
| 27 |
+
</div>
|
| 28 |
+
</body>
|
| 29 |
+
</html>
|
app/templates/predict_expr_recognition_bkp.html
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Predict</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Expression Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<h2>
|
| 15 |
+
<center>
|
| 16 |
+
<span style="font-weight:bold;font-family:sans-serif">Prediction: </span>
|
| 17 |
+
<span style="font-weight:bold;color:blue"> {{result}}</span>
|
| 18 |
+
</center>
|
| 19 |
+
</h2>
|
| 20 |
+
<h3><center><span style="font-weight:bold;font-family:sans-serif">Input image:</span></Input></center></h3>
|
| 21 |
+
<p>
|
| 22 |
+
<center>
|
| 23 |
+
<img src="{{expr_rec_filename}}" alt={{expr_rec_filename1}} width='150' height='150'>
|
| 24 |
+
</center>
|
| 25 |
+
</p>
|
| 26 |
+
<br>
|
| 27 |
+
<form action="/expr_recognition/" method="get">
|
| 28 |
+
<center><button type="submit">Check Another Input</button></center>
|
| 29 |
+
</form>
|
| 30 |
+
<br>
|
| 31 |
+
<form action="/" method="get">
|
| 32 |
+
<center><button type="submit">Home</button></center>
|
| 33 |
+
</form>
|
| 34 |
+
</fieldset>
|
| 35 |
+
</div>
|
| 36 |
+
</body>
|
| 37 |
+
</html>
|
app/templates/predict_face_recognition_bkp.html
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Predict</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Recognition</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<h2>
|
| 15 |
+
<center>
|
| 16 |
+
<span style="font-weight:bold;font-family:sans-serif">Prediction: </span>
|
| 17 |
+
<span style="font-weight:bold;color:blue"> {{result}}</span>
|
| 18 |
+
</center>
|
| 19 |
+
</h2>
|
| 20 |
+
<h3><center><span style="font-weight:bold;font-family:sans-serif">Input image:</span></Input></center></h3>
|
| 21 |
+
<p>
|
| 22 |
+
<center>
|
| 23 |
+
<img src="{{face_rec_filename}}" alt={{face_rec_filename1}} width='150' height='150'>
|
| 24 |
+
</center>
|
| 25 |
+
</p>
|
| 26 |
+
<br>
|
| 27 |
+
<form action="/face_recognition/" method="get">
|
| 28 |
+
<center><button type="submit">Check Another Input</button></center>
|
| 29 |
+
</form>
|
| 30 |
+
<br>
|
| 31 |
+
<form action="/" method="get">
|
| 32 |
+
<center><button type="submit">Home</button></center>
|
| 33 |
+
</form>
|
| 34 |
+
</fieldset>
|
| 35 |
+
</div>
|
| 36 |
+
</body>
|
| 37 |
+
</html>
|
app/templates/predict_similarity_bkp.html
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Predict</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Similarity</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<h2>
|
| 15 |
+
<center>
|
| 16 |
+
<span style="font-weight:bold;font-family:sans-serif">Dissimilarity:</span>
|
| 17 |
+
<span style="font-weight:bold;color:blue"> {{result}}</span>
|
| 18 |
+
</center>
|
| 19 |
+
</h2>
|
| 20 |
+
<h3><center><span style="font-weight:bold;font-family:sans-serif">Input images:</span></Input></center></h3>
|
| 21 |
+
<p>
|
| 22 |
+
<center>
|
| 23 |
+
<img src="{{simi_filename1}}" alt={{simi_filename1}} width='150' height='150'>
|
| 24 |
+
<img src="{{simi_filename2}}" alt={{simi_filename2}} width='150' height='150'>
|
| 25 |
+
</center>
|
| 26 |
+
</p>
|
| 27 |
+
<br>
|
| 28 |
+
<form action="/similarity/" method="get">
|
| 29 |
+
<center><button type="submit">Check Another Input</button></center>
|
| 30 |
+
</form>
|
| 31 |
+
<br>
|
| 32 |
+
<form action="/" method="get">
|
| 33 |
+
<center><button type="submit">Home</button></center>
|
| 34 |
+
</form>
|
| 35 |
+
</fieldset>
|
| 36 |
+
</div>
|
| 37 |
+
</body>
|
| 38 |
+
</html>
|
app/templates/similarity_bkp.html
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<title>Index</title>
|
| 5 |
+
</head>
|
| 6 |
+
<body>
|
| 7 |
+
<div>
|
| 8 |
+
<h1 style="background-color:LightGray;">
|
| 9 |
+
<center>Face Similarity</center>
|
| 10 |
+
</h1>
|
| 11 |
+
</div>
|
| 12 |
+
<div>
|
| 13 |
+
<fieldset>
|
| 14 |
+
<ul>
|
| 15 |
+
<!li>
|
| 16 |
+
<br>
|
| 17 |
+
<form action="/predict_similarity/" enctype="multipart/form-data" method="post">
|
| 18 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload First Image:</span> <br><br>
|
| 19 |
+
<input name="file1" type="file" onchange="readURL(this);" />
|
| 20 |
+
<br><br><br>
|
| 21 |
+
<span style="font-weight:bold;font-family:sans-serif">Upload Second Image:</span> <br><br>
|
| 22 |
+
<input name="file2" type="file" onchange="readURL(this);" />
|
| 23 |
+
<br><br><br><br>
|
| 24 |
+
<button type="submit">Check Similarity</button>
|
| 25 |
+
</form>
|
| 26 |
+
<!/li>
|
| 27 |
+
<br><br>
|
| 28 |
+
<form action="/" method="get">
|
| 29 |
+
<button type="submit">Home</button>
|
| 30 |
+
</form>
|
| 31 |
+
</ul>
|
| 32 |
+
</fieldset>
|
| 33 |
+
</div>
|
| 34 |
+
</body>
|
| 35 |
+
</html>
|