Upload 4 files
Browse files- app.py +103 -0
- emotion_detection_using_face.py +148 -0
- model/emotion_model.h5 +3 -0
- model/emotion_model.json +1 -0
app.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
from tensorflow.keras.models import model_from_json
|
| 6 |
+
import os
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import tempfile
|
| 9 |
+
|
| 10 |
+
# ✅ Ensure this is the first Streamlit command
|
| 11 |
+
st.set_page_config(page_title="Emotion Detection App", layout="wide")
|
| 12 |
+
|
| 13 |
+
# Constants
|
| 14 |
+
TARGET_SIZE = (48, 48)
|
| 15 |
+
EMOTION_DICT = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
|
| 16 |
+
|
| 17 |
+
# Load Model
|
| 18 |
+
@st.cache_resource
|
| 19 |
+
def load_emotion_model():
|
| 20 |
+
with open('model/emotion_model.json', 'r') as json_file:
|
| 21 |
+
loaded_model_json = json_file.read()
|
| 22 |
+
emotion_model = model_from_json(loaded_model_json, custom_objects={'Sequential': tf.keras.models.Sequential})
|
| 23 |
+
emotion_model.load_weights("model/emotion_model.h5")
|
| 24 |
+
return emotion_model
|
| 25 |
+
|
| 26 |
+
emotion_model = load_emotion_model()
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Load Haar Cascade for face detection
|
| 30 |
+
face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 31 |
+
|
| 32 |
+
# Function to process image
|
| 33 |
+
def process_image(image):
|
| 34 |
+
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 35 |
+
faces = face_detector.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=5)
|
| 36 |
+
|
| 37 |
+
for (x, y, w, h) in faces:
|
| 38 |
+
cv2.rectangle(image, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
|
| 39 |
+
roi_gray_frame = gray_img[y:y + h, x:x + w]
|
| 40 |
+
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, TARGET_SIZE), -1), 0)
|
| 41 |
+
|
| 42 |
+
emotion_prediction = emotion_model.predict(cropped_img)
|
| 43 |
+
maxindex = int(np.argmax(emotion_prediction))
|
| 44 |
+
cv2.putText(image, EMOTION_DICT[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
|
| 45 |
+
|
| 46 |
+
return image
|
| 47 |
+
|
| 48 |
+
# Streamlit UI
|
| 49 |
+
st.title("😀 Emotion Detection App")
|
| 50 |
+
st.write("Upload an image or video, or use live camera to detect emotions in real-time.")
|
| 51 |
+
|
| 52 |
+
# Sidebar navigation
|
| 53 |
+
option = st.sidebar.radio("Choose an option", ["📷 Upload Image", "🎥 Upload Video", "📡 Live Camera"])
|
| 54 |
+
|
| 55 |
+
# Image Upload
|
| 56 |
+
if option == "📷 Upload Image":
|
| 57 |
+
uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
|
| 58 |
+
|
| 59 |
+
if uploaded_image is not None:
|
| 60 |
+
file_bytes = np.asarray(bytearray(uploaded_image.read()), dtype=np.uint8)
|
| 61 |
+
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 62 |
+
|
| 63 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 64 |
+
|
| 65 |
+
if st.button("🔍 Detect Emotion"):
|
| 66 |
+
processed_img = process_image(image)
|
| 67 |
+
st.image(processed_img, caption="Processed Image", use_column_width=True)
|
| 68 |
+
|
| 69 |
+
# Video Upload
|
| 70 |
+
elif option == "🎥 Upload Video":
|
| 71 |
+
uploaded_video = st.file_uploader("Upload a video", type=["mp4", "avi", "mov", "mkv"])
|
| 72 |
+
|
| 73 |
+
if uploaded_video is not None:
|
| 74 |
+
tfile = tempfile.NamedTemporaryFile(delete=False)
|
| 75 |
+
tfile.write(uploaded_video.read())
|
| 76 |
+
|
| 77 |
+
cap = cv2.VideoCapture(tfile.name)
|
| 78 |
+
stframe = st.empty()
|
| 79 |
+
|
| 80 |
+
while cap.isOpened():
|
| 81 |
+
ret, frame = cap.read()
|
| 82 |
+
if not ret:
|
| 83 |
+
break
|
| 84 |
+
|
| 85 |
+
processed_frame = process_image(frame)
|
| 86 |
+
stframe.image(processed_frame, channels="BGR")
|
| 87 |
+
|
| 88 |
+
cap.release()
|
| 89 |
+
|
| 90 |
+
# Live Camera
|
| 91 |
+
elif option == "📡 Live Camera":
|
| 92 |
+
stframe = st.empty()
|
| 93 |
+
cap = cv2.VideoCapture(0)
|
| 94 |
+
|
| 95 |
+
while cap.isOpened():
|
| 96 |
+
ret, frame = cap.read()
|
| 97 |
+
if not ret:
|
| 98 |
+
break
|
| 99 |
+
|
| 100 |
+
processed_frame = process_image(frame)
|
| 101 |
+
stframe.image(processed_frame, channels="BGR")
|
| 102 |
+
|
| 103 |
+
cap.release()
|
emotion_detection_using_face.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, request, redirect, url_for, render_template, send_file, Response
|
| 2 |
+
import os
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tensorflow.keras.models import model_from_json
|
| 6 |
+
import tensorflow as tf
|
| 7 |
+
|
| 8 |
+
# Constants
|
| 9 |
+
TARGET_SIZE = (48, 48)
|
| 10 |
+
BATCH_SIZE = 64
|
| 11 |
+
EMOTION_DICT = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
|
| 12 |
+
|
| 13 |
+
app = Flask(__name__)
|
| 14 |
+
app.config['UPLOAD_FOLDER'] = 'uploads'
|
| 15 |
+
app.config['PROCESSED_FOLDER'] = 'processed'
|
| 16 |
+
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
|
| 17 |
+
os.makedirs(app.config['PROCESSED_FOLDER'], exist_ok=True)
|
| 18 |
+
|
| 19 |
+
def load_model():
|
| 20 |
+
with open('model/emotion_model.json', 'r') as json_file:
|
| 21 |
+
loaded_model_json = json_file.read()
|
| 22 |
+
try:
|
| 23 |
+
emotion_model = model_from_json(loaded_model_json, custom_objects={'Sequential': tf.keras.models.Sequential})
|
| 24 |
+
except TypeError:
|
| 25 |
+
emotion_model = model_from_json(loaded_model_json, custom_objects={'Sequential': tf.keras.models.Sequential})
|
| 26 |
+
emotion_model.load_weights("model/emotion_model.h5")
|
| 27 |
+
print("Loaded model from disk")
|
| 28 |
+
return emotion_model
|
| 29 |
+
|
| 30 |
+
emotion_model = load_model()
|
| 31 |
+
|
| 32 |
+
def process_image(file_path, output_path):
|
| 33 |
+
img = cv2.imread(file_path)
|
| 34 |
+
if img is None:
|
| 35 |
+
print(f"Error loading image {file_path}")
|
| 36 |
+
return
|
| 37 |
+
|
| 38 |
+
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 39 |
+
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
|
| 40 |
+
faces = face_detector.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=5)
|
| 41 |
+
|
| 42 |
+
for (x, y, w, h) in faces:
|
| 43 |
+
cv2.rectangle(img, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
|
| 44 |
+
roi_gray_frame = gray_img[y:y + h, x:x + w]
|
| 45 |
+
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, TARGET_SIZE), -1), 0)
|
| 46 |
+
|
| 47 |
+
emotion_prediction = emotion_model.predict(cropped_img)
|
| 48 |
+
maxindex = int(np.argmax(emotion_prediction))
|
| 49 |
+
cv2.putText(img, EMOTION_DICT[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
|
| 50 |
+
|
| 51 |
+
cv2.imwrite(output_path, img)
|
| 52 |
+
|
| 53 |
+
def process_video(file_path, output_path):
|
| 54 |
+
cap = cv2.VideoCapture(file_path)
|
| 55 |
+
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
|
| 56 |
+
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
| 57 |
+
out = cv2.VideoWriter(output_path, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
|
| 58 |
+
|
| 59 |
+
while cap.isOpened():
|
| 60 |
+
ret, frame = cap.read()
|
| 61 |
+
if not ret:
|
| 62 |
+
break
|
| 63 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 64 |
+
num_faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
| 65 |
+
|
| 66 |
+
for (x, y, w, h) in num_faces:
|
| 67 |
+
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
|
| 68 |
+
roi_gray_frame = gray_frame[y:y + h, x:x + w]
|
| 69 |
+
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, TARGET_SIZE), -1), 0)
|
| 70 |
+
|
| 71 |
+
emotion_prediction = emotion_model.predict(cropped_img)
|
| 72 |
+
maxindex = int(np.argmax(emotion_prediction))
|
| 73 |
+
cv2.putText(frame, EMOTION_DICT[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
|
| 74 |
+
|
| 75 |
+
out.write(frame)
|
| 76 |
+
|
| 77 |
+
cap.release()
|
| 78 |
+
out.release()
|
| 79 |
+
cv2.destroyAllWindows()
|
| 80 |
+
|
| 81 |
+
def gen_frames():
|
| 82 |
+
cap = cv2.VideoCapture(0)
|
| 83 |
+
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
|
| 84 |
+
|
| 85 |
+
while True:
|
| 86 |
+
success, frame = cap.read()
|
| 87 |
+
if not success:
|
| 88 |
+
break
|
| 89 |
+
else:
|
| 90 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 91 |
+
faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
| 92 |
+
|
| 93 |
+
for (x, y, w, h) in faces:
|
| 94 |
+
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
|
| 95 |
+
roi_gray_frame = gray_frame[y:y + h, x:x + w]
|
| 96 |
+
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, TARGET_SIZE), -1), 0)
|
| 97 |
+
|
| 98 |
+
emotion_prediction = emotion_model.predict(cropped_img)
|
| 99 |
+
maxindex = int(np.argmax(emotion_prediction))
|
| 100 |
+
cv2.putText(frame, EMOTION_DICT[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
|
| 101 |
+
|
| 102 |
+
ret, buffer = cv2.imencode('.jpg', frame)
|
| 103 |
+
frame = buffer.tobytes()
|
| 104 |
+
yield (b'--frame\r\n'
|
| 105 |
+
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
| 106 |
+
|
| 107 |
+
cap.release()
|
| 108 |
+
|
| 109 |
+
@app.route('/')
|
| 110 |
+
def index():
|
| 111 |
+
return render_template('camera.html')
|
| 112 |
+
|
| 113 |
+
@app.route('/upload', methods=['POST'])
|
| 114 |
+
def upload_file():
|
| 115 |
+
if 'file' not in request.files:
|
| 116 |
+
return redirect(request.url)
|
| 117 |
+
file = request.files['file']
|
| 118 |
+
if file.filename == '':
|
| 119 |
+
return redirect(request.url)
|
| 120 |
+
if file:
|
| 121 |
+
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
|
| 122 |
+
file.save(file_path)
|
| 123 |
+
output_path = os.path.join(app.config['PROCESSED_FOLDER'], file.filename)
|
| 124 |
+
|
| 125 |
+
if file.filename.lower().endswith(('png', 'jpg', 'jpeg')):
|
| 126 |
+
process_image(file_path, output_path)
|
| 127 |
+
elif file.filename.lower().endswith(('mp4', 'avi', 'mov', 'mkv')):
|
| 128 |
+
process_video(file_path, output_path)
|
| 129 |
+
else:
|
| 130 |
+
return 'Unsupported file type'
|
| 131 |
+
|
| 132 |
+
return redirect(url_for('download_file', filename=file.filename))
|
| 133 |
+
return 'File upload failed'
|
| 134 |
+
|
| 135 |
+
@app.route('/download/<filename>')
|
| 136 |
+
def download_file(filename):
|
| 137 |
+
return send_file(os.path.join(app.config['PROCESSED_FOLDER'], filename), as_attachment=True)
|
| 138 |
+
|
| 139 |
+
@app.route('/video_feed')
|
| 140 |
+
def video_feed():
|
| 141 |
+
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
|
| 142 |
+
|
| 143 |
+
@app.route('/camera')
|
| 144 |
+
def camera():
|
| 145 |
+
return render_template('camera.html')
|
| 146 |
+
|
| 147 |
+
if __name__ == "__main__":
|
| 148 |
+
app.run(debug=True)
|
model/emotion_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3f0b5e2305190d32260bf2cad3907d4f305bcff0600096a6cbc814476dfb697
|
| 3 |
+
size 9412808
|
model/emotion_model.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "conv2d_input"}}, {"class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "filters": 32, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 64, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Flatten", "config": {"name": "flatten", "trainable": true, "dtype": "float32", "data_format": "channels_last"}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1024, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.5, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 7, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.4.0", "backend": "tensorflow"}
|