Spaces:
Sleeping
Sleeping
import streamlit as st | |
from model import load_model, process_and_predict | |
from landmarks import normalize_landmarks, calculate_angles | |
from visualization import plot_hand_landmarks | |
import os | |
st.set_page_config(layout="wide") | |
# Define the alphabets | |
all_alphabets = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' | |
excluded_alphabets = 'DMNPTUVXZ' | |
working_alphabets = ''.join(set(all_alphabets) - set(excluded_alphabets)) | |
# README content | |
readme_content = f""" | |
## How it works | |
This ASL Recognition App uses image processing and machine learning to recognize American Sign Language (ASL) hand signs. | |
1. **Image Upload**: Users can upload an image of an ASL hand sign. | |
2. **Hand Detection**: The app uses MediaPipe to detect hand landmarks in the image. | |
3. **Feature Extraction**: Angles between hand landmarks are calculated and normalized. | |
4. **Prediction**: A Random Forest model predicts the ASL sign based on the extracted features. | |
5. **Visualization**: The app displays the detected hand landmarks and top predictions. | |
### Supported Alphabets | |
The app currently works for the following ASL alphabets: | |
{', '.join(working_alphabets)} | |
The app does not support or may not work correctly for: | |
{', '.join(excluded_alphabets)} | |
Note: The model's performance may vary and is subject to improvement. | |
The "View Hand Landmarks" tab allows users to see hand landmarks for pre-loaded ASL signs. | |
""" | |
# Load the model | |
model = load_model() | |
# Ensure the model is loaded before proceeding | |
if model is None: | |
st.stop() | |
# Streamlit app | |
st.title("ASL Recognition App") | |
# Display README content | |
st.sidebar.markdown(readme_content) | |
# Create tabs for different functionalities | |
tab1, tab2 = st.tabs(["Predict ASL Sign", "View Hand Landmarks"]) | |
with tab1: | |
st.header("Predict ASL Sign") | |
uploaded_file = st.file_uploader("Upload an image of an ASL sign", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
try: | |
image = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), 1) | |
if image is not None: | |
col1, col2 = st.columns(2) | |
with col1: | |
st.image(image, caption="Uploaded Image", use_column_width=True) | |
probabilities, landmarks = process_and_predict(image) | |
if probabilities is not None and landmarks is not None: | |
with col2: | |
st.subheader("Top 5 Predictions:") | |
top_indices = np.argsort(probabilities)[::-1][:5] | |
for i in top_indices: | |
st.write(f"{model.classes_[i]}: {probabilities[i]:.2f}") | |
fig = plot_hand_landmarks(landmarks, "Detected Hand Landmarks") | |
st.pyplot(fig) | |
else: | |
st.write("No hand detected in the image.") | |
else: | |
st.error("Failed to load the image. The file might be corrupted.") | |
except Exception as e: | |
st.error(f"An error occurred while processing the image: {str(e)}") | |
with tab2: | |
st.header("View Hand Landmarks") | |
selected_alphabets = st.multiselect("Select alphabets to view landmarks:", list(working_alphabets)) | |
if selected_alphabets: | |
cols = st.columns(4) # 4 columns for smaller images | |
for idx, alphabet in enumerate(selected_alphabets): | |
with cols[idx % 4]: | |
image_path = os.path.join('asl test set', f'{alphabet.lower()}.jpeg') | |
if os.path.exists(image_path): | |
try: | |
image = cv2.imread(image_path) | |
if image is not None: | |
probabilities, landmarks = process_and_predict(image) | |
if landmarks is not None: | |
fig = plot_hand_landmarks(landmarks, f"Hand Landmarks for {alphabet}") | |
st.pyplot(fig) | |
else: | |
st.error(f"No hand detected for {alphabet}") | |
else: | |
st.error(f"Failed to load image for {alphabet}") | |
except Exception as e: | |
st.error(f"Error processing image for {alphabet}") | |
else: | |
st.error(f"Image not found for {alphabet}") | |