Harsh-Jadhav
commited on
Commit
•
75e794a
1
Parent(s):
8257984
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from fastai.vision.all import *
|
3 |
+
|
4 |
+
# Load the pre-trained model
|
5 |
+
model_path = "assets/model.pkl" # Replace with the path to your model file
|
6 |
+
learn = load_learner(model_path)
|
7 |
+
|
8 |
+
# Define a function to make predictions on an image
|
9 |
+
def predict(image):
|
10 |
+
img = PILImage.create(image)
|
11 |
+
pred, _, probs = learn.predict(img)
|
12 |
+
return pred, probs[pred]
|
13 |
+
|
14 |
+
# Streamlit app
|
15 |
+
st.title("Hand Gesture Recognition")
|
16 |
+
|
17 |
+
# Upload an image
|
18 |
+
uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
|
19 |
+
|
20 |
+
if uploaded_image is not None:
|
21 |
+
# Display the uploaded image
|
22 |
+
st.image(uploaded_image, caption="Uploaded Image", use_column_width=True)
|
23 |
+
|
24 |
+
# Make predictions
|
25 |
+
prediction, confidence = predict(uploaded_image)
|
26 |
+
|
27 |
+
# Display the prediction
|
28 |
+
st.subheader("Prediction:")
|
29 |
+
st.write(f"Gesture: {prediction}")
|
30 |
+
st.write(f"Confidence: {confidence:.2f}")
|
31 |
+
|
32 |
+
# Capture an image from webcam
|
33 |
+
capture = st.checkbox("Capture an Image from Webcam")
|
34 |
+
|
35 |
+
if capture:
|
36 |
+
st.write("Click the button to capture the image")
|
37 |
+
capture_button = st.button("Capture")
|
38 |
+
|
39 |
+
if capture_button:
|
40 |
+
# Capture the image from the webcam
|
41 |
+
# You can use Python libraries like OpenCV for this
|
42 |
+
# Replace this with your code for capturing an image
|
43 |
+
|
44 |
+
# For example:
|
45 |
+
# cap = cv2.VideoCapture(0)
|
46 |
+
# ret, frame = cap.read()
|
47 |
+
# cv2.imwrite("captured_image.jpg", frame)
|
48 |
+
# cap.release()
|
49 |
+
|
50 |
+
# After capturing the image, predict and display it
|
51 |
+
captured_image_path = "captured_image.jpg" # Replace with the actual path
|
52 |
+
st.image(captured_image_path, caption="Captured Image", use_column_width=True)
|
53 |
+
|
54 |
+
# Make predictions
|
55 |
+
prediction, confidence = predict(captured_image_path)
|
56 |
+
|
57 |
+
# Display the prediction
|
58 |
+
st.subheader("Prediction:")
|
59 |
+
st.write(f"Gesture: {prediction}")
|
60 |
+
st.write(f"Confidence: {confidence:.2f}")
|
61 |
+
|
62 |
+
# Example images
|
63 |
+
st.sidebar.title("Example Images")
|
64 |
+
example_images = {
|
65 |
+
"Image 1": "example_image1.jpg",
|
66 |
+
"Image 2": "example_image2.jpg",
|
67 |
+
"Image 3": "example_image3.jpg",
|
68 |
+
}
|
69 |
+
|
70 |
+
selected_example = st.sidebar.selectbox("Select an Example Image", list(example_images.keys()))
|
71 |
+
|
72 |
+
if selected_example:
|
73 |
+
selected_image_path = example_images[selected_example]
|
74 |
+
st.image(selected_image_path, caption=selected_example, use_column_width=True)
|
75 |
+
|
76 |
+
# Make predictions for the example image
|
77 |
+
example_prediction, example_confidence = predict(selected_image_path)
|
78 |
+
|
79 |
+
# Display the prediction
|
80 |
+
st.sidebar.subheader("Prediction:")
|
81 |
+
st.sidebar.write(f"Gesture: {example_prediction}")
|
82 |
+
st.sidebar.write(f"Confidence: {example_confidence:.2f}")
|