Pradeep018 commited on
Commit
32014d8
·
verified ·
1 Parent(s): 65f7d35

Upload 11 files

Browse files
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ test_video.mp4
2
+ test.jpg
3
+ 0*.py
4
+ __pycache__
5
+ ISL2.keras
6
+ ISL4.keras
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ From python:3.10.10
2
+
3
+ WORKDIR /app
4
+
5
+ COPY requirements.txt ./requirements.txt
6
+
7
+ RUN pip install -r requirements.txt
8
+
9
+ EXPOSE 8501
10
+
11
+ COPY . /app
12
+
13
+ ENTRYPOINT ["streamlit","run"]
14
+
15
+ CMD ["app.py"]
Models/yolo8n-signlanguagedetection.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:040ac545ec3e03a4ab687b00ebb2cca6e7262534e0811fa3d3eaa80af4beca85
3
+ size 6249400
Models/yolo_v8_nano_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59653c5ebdc82c93372189910d73e64701a83e817ce496aa130121dc2af6a66a
3
+ size 6246041
PROJECT_README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sign Language detection
2
+
3
+ Detects the different letters represented by actions of hand in sign language
4
+
5
+
6
+ 🚀Trained on **YOLOv8 Nano** model achieving **mAP:50 - 0.94** & **mAP:50-95 - 0.89**
7
+
8
+ 🤗 Hugging Face APP Link: https://huggingface.co/spaces/Pradeep018/Sign-Language-detection/
9
+
10
+ * 🦹‍♂️ Dataset used: https://universe.roboflow.com/david-lee-d0rhs/american-sign-language-letters/
11
+
12
+ **Requirements for projects**
13
+ ```
14
+ ultralytics
15
+ torch
16
+ numpy
17
+ streamlit
18
+ ```
Train Model/Sign_Language_Detection_YOLOv8.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from pipeline import detectPipeline
3
+
4
+
5
+ st.title('Sign Language Detection')
6
+ st.write('Detects Sign language Alphabets in an image \nPowered by CNN model')
7
+
8
+ st.write('')
9
+
10
+ detect_pipeline = detectPipeline()
11
+
12
+ st.info('Sign Language Detection model loaded successfully!')
13
+
14
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"])
15
+
16
+ if uploaded_file is not None:
17
+
18
+ with st.container():
19
+ col1, col2 = st.columns([3, 3])
20
+
21
+ col1.header('Input Image')
22
+ col1.image(uploaded_file, caption='Uploaded Image', use_column_width=True)
23
+
24
+ col1.text('')
25
+ col1.text('')
26
+
27
+ if st.button('Detect'):
28
+ detections = detect_pipeline.detect_signs(img_path=uploaded_file)
29
+ detections_img = detect_pipeline.drawDetections2Image(img_path=uploaded_file, detections=detections)
30
+
31
+ col2.header('Detections')
32
+ col2.image(detections_img, caption='Predictions by model', use_column_width=True)
33
+
34
+ # Extract text results from detections
35
+ text_results = detect_pipeline.extractTextResults(detections)
36
+
37
+ # Display text results below the image
38
+ col2.text('Textual Results:')
39
+ col2.text(text_results)
40
+
41
+ # Ensure you have implemented the `extractTextResults` method in your `pipeline.py` file
42
+
gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
pages/1_SIgn_Language_Detection_On_Live_Video_Stream.p.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_webrtc import webrtc_streamer
3
+ import av
4
+ from ultralytics import YOLO
5
+
6
+
7
+ # load yolo model
8
+
9
+ yolo = YOLO('Models/yolo8n-signlanguagedetection.pt')
10
+
11
+
12
+ def video_frame_callback(frame):
13
+ # img = frame.to_ndarray(format="bgr24")
14
+ # # any operation
15
+ # #flipped = img[::-1,:,:]
16
+ # pred_img = yolo.predictions(img)
17
+
18
+ # return av.VideoFrame.from_ndarray(pred_img, format="bgr24")
19
+ img = frame.to_image()
20
+ res = yolo(img)
21
+ res_plotted = res[0].plot().astype('uint8')
22
+ return av.VideoFrame.from_ndarray(res_plotted, format="bgr24")
23
+
24
+ webrtc_streamer(key="example",
25
+ video_frame_callback=video_frame_callback,
26
+ media_stream_constraints={"video":True,"audio":False})
pipeline.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ from PIL import Image
3
+ import numpy as np
4
+ import cv2 as cv
5
+ import pandas as pd
6
+
7
+
8
+ class detectPipeline():
9
+ def __init__(self) -> None:
10
+ self.model = YOLO('Models/yolo_v8_nano_model.pt')
11
+ self.class_names = {i: chr(65 + i) for i in range(26)}
12
+
13
+
14
+ def detect_signs(self, img_path: str):
15
+ # Data Preprocessing
16
+ img = Image.open(img_path).convert('RGB')
17
+ img_array = np.array(img)
18
+
19
+ # Making detections using YOLOv8 Nano
20
+ detections = self.model(img_array)[0]
21
+ sign_detections = []
22
+ for sign in detections.boxes.data.tolist():
23
+ x1, y1, x2, y2, score, class_id = sign
24
+ sign_detections.append([int(x1), int(y1), int(x2), int(y2), score, int(class_id)])
25
+ print(sign_detections)
26
+ return sign_detections
27
+
28
+ def drawDetections2Image(self, img_path, detections):
29
+ img = Image.open(img_path).convert('RGB')
30
+ img = np.array(img)
31
+ for bbox in detections:
32
+ x1, y1, x2, y2, score, class_id = bbox
33
+ cv.rectangle(img, pt1=(x1, y1), pt2=(x2, y2), color=(0, 255, 0), thickness=4)
34
+ cv.putText(img, text=f'{self.class_names[class_id]} ({round(score*100, 2)}%)', org=(x1, y1-20), fontFace=cv.FONT_HERSHEY_SIMPLEX, fontScale=1.5,
35
+ color=(0, 0, 255), lineType=cv.LINE_AA, thickness=4)
36
+ img_detections = np.array(img)
37
+ return img_detections
38
+
39
+ # get sign_detetction
40
+ def extractTextResults(self, detections):
41
+ text_results = ''
42
+ for bbox in detections:
43
+ x1, y1, x2, y2, score, class_id = bbox
44
+ text_results += f'{self.class_names[class_id]} : {score}\n'
45
+ return text_results
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ultralytics
2
+ torch
3
+ numpy
4
+ streamlit
5
+ opencv-python
6
+ streamlit-webrtc
7
+ tensorflow