Harsh72AI commited on
Commit
79f8119
·
verified ·
1 Parent(s): ff56350

Project Files committed

Browse files
PROJECT_README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sign Language Letters detection
2
+
3
+ Detects the different letters represented by actions of hand in sign language
4
+
5
+
6
+ 🚀Trained on **YOLOv8 Nano** model achieving **mAP:50 - 0.94** & **mAP:50-95 - 0.89**
7
+
8
+ 🤗 Hugging Face APP Link: https://huggingface.co/spaces/Harsh72AI/Sign-Language-Letters-detection/
9
+
10
+ * 🦹‍♂️ Dataset used: https://universe.roboflow.com/david-lee-d0rhs/american-sign-language-letters/
11
+
12
+ **Requirements for projects**
13
+ ```
14
+ ultralytics
15
+ torch
16
+ numpy
17
+ streamlit
18
+ ```
Sign-language-detection-results-20240115T023838Z-001.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a28ad8479a18cb4b03c88b3ce9b0ad4deb7539968dfb0b6ca01ffe577db45952
3
+ size 19253170
Sign_Language_Detection_YOLOv8.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from pipeline import detectPipeline
3
+
4
+
5
+ st.title('Sign Language Letters detection')
6
+ st.write('Detects Sign language Alphabets in an image \nPowered by YOLOv8 Nano model')
7
+
8
+ st.write('')
9
+
10
+ detect_pipeline = detectPipeline()
11
+
12
+ st.info('Sign Language Letters detection model loaded successfully!')
13
+
14
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
15
+
16
+ if uploaded_file is not None:
17
+
18
+ with st.container():
19
+ col1, col2 = st.columns([3, 3])
20
+
21
+ col1.header('Input Image')
22
+ col1.image(uploaded_file, caption='Uploaded Image', use_column_width=True)
23
+
24
+ col1.text('')
25
+ col1.text('')
26
+
27
+ if st.button('Detect'):
28
+ detections = detect_pipeline.detect_signs(img_path=uploaded_file)
29
+ detections_img = detect_pipeline.drawDetections2Image(img_path=uploaded_file, detections=detections)
30
+
31
+ col2.header('Detections')
32
+ col2.image(detections_img, caption='Predictions by model', use_column_width=True)
33
+
pipeline.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ from PIL import Image
3
+ import numpy as np
4
+ import cv2 as cv
5
+
6
+
7
+ class detectPipeline():
8
+ def __init__(self) -> None:
9
+ self.model = YOLO('yolo_v8_nano_model.pt')
10
+ self.class_names = {i: chr(65 + i) for i in range(26)}
11
+
12
+
13
+ def detect_signs(self, img_path: str):
14
+ # Data Preprocessing
15
+ img = Image.open(img_path).convert('RGB')
16
+ img_array = np.array(img)
17
+
18
+ # Making detections using YOLOv8 Nano
19
+ detections = self.model(img_array)[0]
20
+ sign_detections = []
21
+ for sign in detections.boxes.data.tolist():
22
+ x1, y1, x2, y2, score, class_id = sign
23
+ sign_detections.append([int(x1), int(y1), int(x2), int(y2), score, int(class_id)])
24
+ return sign_detections
25
+
26
+ def drawDetections2Image(self, img_path, detections):
27
+ img = Image.open(img_path).convert('RGB')
28
+ img = np.array(img)
29
+ for bbox in detections:
30
+ x1, y1, x2, y2, score, class_id = bbox
31
+ cv.rectangle(img, pt1=(x1, y1), pt2=(x2, y2), color=(0, 255, 0), thickness=25)
32
+ cv.putText(img, text=f'{self.class_names[class_id]} ({round(score*100, 2)}%)', org=(x1, y1-20), fontFace=cv.FONT_HERSHEY_SIMPLEX, fontScale=3.5,
33
+ color=(0, 0, 255), lineType=cv.LINE_AA, thickness=10)
34
+ img_detections = np.array(img)
35
+ return img_detections
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ultralytics
2
+ torch
3
+ numpy
4
+ streamlit
yolo_v8_nano_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59653c5ebdc82c93372189910d73e64701a83e817ce496aa130121dc2af6a66a
3
+ size 6246041