|
|
|
|
|
|
|
|
|
|
|
|
|
import streamlit as st |
|
import joblib |
|
import json |
|
import numpy as np |
|
import base64 |
|
import cv2 |
|
import pywt |
|
from PIL import Image |
|
import io |
|
|
|
__class_name_to_number = {} |
|
__class_number_to_name = {} |
|
|
|
__model = None |
|
|
|
def w2d(img, mode='haar', level=1): |
|
imArray = img |
|
|
|
|
|
imArray = cv2.cvtColor( imArray,cv2.COLOR_RGB2GRAY ) |
|
|
|
imArray = np.float32(imArray) |
|
imArray /= 255; |
|
|
|
coeffs=pywt.wavedec2(imArray, mode, level=level) |
|
|
|
|
|
coeffs_H=list(coeffs) |
|
coeffs_H[0] *= 0; |
|
|
|
|
|
imArray_H=pywt.waverec2(coeffs_H, mode); |
|
imArray_H *= 255; |
|
imArray_H = np.uint8(imArray_H) |
|
|
|
return imArray_H |
|
|
|
def classify_image(image_base64_data, file_path=None): |
|
|
|
imgs = get_cropped_image_if_2_eyes(file_path, image_base64_data) |
|
|
|
result = [] |
|
for img in imgs: |
|
scalled_raw_img = cv2.resize(img, (32, 32)) |
|
img_har = w2d(img, 'db1', 5) |
|
scalled_img_har = cv2.resize(img_har, (32, 32)) |
|
combined_img = np.vstack((scalled_raw_img.reshape(32 * 32 * 3, 1), scalled_img_har.reshape(32 * 32, 1))) |
|
|
|
len_image_array = 32*32*3 + 32*32 |
|
|
|
final = combined_img.reshape(1,len_image_array).astype(float) |
|
result.append({ |
|
'class': class_number_to_name(__model.predict(final)[0]), |
|
'class_probability': np.around(__model.predict_proba(final)*100,2).tolist()[0], |
|
'class_dictionary': __class_name_to_number |
|
}) |
|
|
|
return result |
|
|
|
def class_number_to_name(class_num): |
|
return __class_number_to_name[class_num] |
|
|
|
def load_saved_artifacts(): |
|
print("loading saved artifacts...start") |
|
global __class_name_to_number |
|
global __class_number_to_name |
|
|
|
with open("./class_dictionary.json", "r") as f: |
|
__class_name_to_number = json.load(f) |
|
__class_number_to_name = {v:k for k,v in __class_name_to_number.items()} |
|
|
|
global __model |
|
if __model is None: |
|
with open('./saved_model.pkl', 'rb') as f: |
|
__model = joblib.load(f) |
|
print("loading saved artifacts...done") |
|
|
|
|
|
def get_cv2_image_from_base64_string(b64str): |
|
''' |
|
credit: https://stackoverflow.com/questions/33754935/read-a-base-64-encoded-image-from-memory-using-opencv-python-library |
|
:param uri: |
|
:return: |
|
''' |
|
encoded_data = b64str.split(',')[1] |
|
nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8) |
|
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) |
|
return img |
|
|
|
def get_cropped_image_if_2_eyes(image_path, image_base64_data): |
|
face_cascade = cv2.CascadeClassifier('./opencv/haarcascades/haarcascade_frontalface_default.xml') |
|
eye_cascade = cv2.CascadeClassifier('./opencv/haarcascades/haarcascade_eye.xml') |
|
|
|
if image_path: |
|
img = cv2.imread(image_path) |
|
else: |
|
img = get_cv2_image_from_base64_string(image_base64_data) |
|
|
|
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
|
faces = face_cascade.detectMultiScale(gray, 1.3, 5) |
|
|
|
cropped_faces = [] |
|
for (x,y,w,h) in faces: |
|
roi_gray = gray[y:y+h, x:x+w] |
|
roi_color = img[y:y+h, x:x+w] |
|
eyes = eye_cascade.detectMultiScale(roi_gray) |
|
if len(eyes) >= 2: |
|
cropped_faces.append(roi_color) |
|
return cropped_faces |
|
|
|
def get_b64_test_image_for_virat(): |
|
with open("b64.txt") as f: |
|
return f.read() |
|
|
|
|
|
load_saved_artifacts() |
|
|
|
st.title("Celebrity Image Classification") |
|
|
|
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) |
|
|
|
if uploaded_file is not None: |
|
image = Image.open(uploaded_file) |
|
st.image(image, caption='Uploaded Image', use_column_width=True) |
|
|
|
|
|
buffered = io.BytesIO() |
|
image.save(buffered, format="PNG") |
|
img_str = base64.b64encode(buffered.getvalue()).decode() |
|
|
|
|
|
result = classify_image(f"data:image/png;base64,{img_str}", None) |
|
|
|
if result: |
|
st.subheader("Classification Results:") |
|
for r in result: |
|
|
|
probabilities = dict(zip(r['class_dictionary'].keys(), r['class_probability'])) |
|
|
|
|
|
sorted_probabilities = sorted(probabilities.items(), key=lambda x: x[1], reverse=True) |
|
|
|
|
|
import pandas as pd |
|
df = pd.DataFrame(sorted_probabilities, columns=['Celebrity', 'Probability']) |
|
|
|
|
|
st.table(df) |
|
|
|
|
|
st.write(f"Top prediction: {r['class']} with {r['class_probability'][r['class_dictionary'][r['class']]]}% probability") |
|
else: |
|
st.write("No faces detected in the image.") |
|
|
|
|