import cv2 | |
import numpy as np | |
from tensorflow.keras.models import load_model | |
# Load the model | |
model_output = '/content/drive/MyDrive/saved_models/gender_model/gender_compiled_model' | |
model = load_model(model_output) | |
# Load and preprocess an image (assuming 'image_path' is the path to your image) | |
def preprocess_image(image_path): | |
# Load the image using OpenCV | |
img = cv2.imread(image_path) | |
# Resize the image to 160x160, which is the expected input size for InceptionResNetV1 | |
img = cv2.resize(img, (224, 224)) | |
# Convert the image to RGB | |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | |
# Normalize the image (the model expects pixel values between -1 and 1) | |
img = img.astype('float32') / 127.5 - 1 | |
# Add a batch dimension | |
img = np.expand_dims(img, axis=0) | |
return img | |
# Path to the image you want to test | |
image_path = '/content/pic.jpeg' | |
# Preprocess the image | |
input_image = preprocess_image(image_path) | |
# Perform inference to get the face embedding | |
pred = model.predict(input_image) | |
# Labels for the genders | |
labels = ["Woman", "Man"] | |
predicted_label_index = np.argmax(pred) | |
# Get the corresponding label | |
predicted_gender = labels[predicted_label_index] | |
print(f"Predicted Gender: {predicted_gender}") | |