Spaces:
Build error
Build error
Commit
·
317aea0
1
Parent(s):
39a07f8
add YOLO
Browse files- .DS_Store +0 -0
- .gitattributes +1 -0
- app.py +5 -4
- files/a.png +3 -0
- files/huggingface.png +3 -0
- files/image_0.png +3 -0
- files/th1.jpg +0 -0
- files/yolov8n.pt +3 -0
- gitignore +5 -0
- huggingface.png +0 -0
- image_0.png +0 -0
- src/__pycache__/threshold_methods.cpython-310.pyc +0 -0
- src/__pycache__/yolo.cpython-310.pyc +0 -0
- src/test.py +32 -0
- threshold_methods.py → src/threshold_methods.py +1 -1
- src/yolo.py +57 -0
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -4,7 +4,8 @@ Sometimes you just want to take an image from your database and see how it chang
|
|
4 |
'''
|
5 |
|
6 |
import gradio as gr
|
7 |
-
from
|
|
|
8 |
import cv2
|
9 |
|
10 |
new_outputs = [
|
@@ -14,7 +15,7 @@ gr.outputs.Textbox(type="text", label="My linkedin URL"),
|
|
14 |
gr.outputs.Textbox(type="text", label="info")
|
15 |
]
|
16 |
def show_image():
|
17 |
-
img = cv2.imread('huggingface.png')
|
18 |
text1 = 'https://huggingface.co/spaces/pirahansiah/ComputerVision'
|
19 |
text2 = 'https://www.linkedin.com/in/pirahansiah/'
|
20 |
text3 = ' Sometimes you just want to take an image from your database and see how it changes by running different image processing functions, to find the best starting point for your computer vision application. In this Hugging Face space, I have included various pattern recognition functions that can easily be applied to your input images, so you can see the output of each function. I will continue to update this space with additional modes, methods, and deep learning frameworks/models, to make them easy to use for demonstration purposes. Please let me know if you would like me to include any other specific functionality. '
|
@@ -35,7 +36,7 @@ HuggingFace = gr.Interface(
|
|
35 |
)
|
36 |
|
37 |
gr.TabbedInterface(
|
38 |
-
[HuggingFace,threshold_methods],
|
39 |
-
tab_names=['HuggingFace','Thresholding Image Segmentation']
|
40 |
).queue().launch()
|
41 |
|
|
|
4 |
'''
|
5 |
|
6 |
import gradio as gr
|
7 |
+
from src.threshold_methods import threshold_methods
|
8 |
+
from src.yolo import yolo
|
9 |
import cv2
|
10 |
|
11 |
new_outputs = [
|
|
|
15 |
gr.outputs.Textbox(type="text", label="info")
|
16 |
]
|
17 |
def show_image():
|
18 |
+
img = cv2.imread('files/huggingface.png')
|
19 |
text1 = 'https://huggingface.co/spaces/pirahansiah/ComputerVision'
|
20 |
text2 = 'https://www.linkedin.com/in/pirahansiah/'
|
21 |
text3 = ' Sometimes you just want to take an image from your database and see how it changes by running different image processing functions, to find the best starting point for your computer vision application. In this Hugging Face space, I have included various pattern recognition functions that can easily be applied to your input images, so you can see the output of each function. I will continue to update this space with additional modes, methods, and deep learning frameworks/models, to make them easy to use for demonstration purposes. Please let me know if you would like me to include any other specific functionality. '
|
|
|
36 |
)
|
37 |
|
38 |
gr.TabbedInterface(
|
39 |
+
[HuggingFace,threshold_methods,yolo],
|
40 |
+
tab_names=['HuggingFace','Thresholding Image Segmentation','YOLO']
|
41 |
).queue().launch()
|
42 |
|
files/a.png
ADDED
![]() |
Git LFS Details
|
files/huggingface.png
ADDED
![]() |
Git LFS Details
|
files/image_0.png
ADDED
![]() |
Git LFS Details
|
files/th1.jpg
ADDED
![]() |
files/yolov8n.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
|
3 |
+
size 6534387
|
gitignore
CHANGED
@@ -5,5 +5,10 @@ flagged/
|
|
5 |
*.mkv
|
6 |
gradio_cached_examples/
|
7 |
.DS_Store
|
|
|
8 |
__pycache__
|
|
|
9 |
.vscode
|
|
|
|
|
|
|
|
5 |
*.mkv
|
6 |
gradio_cached_examples/
|
7 |
.DS_Store
|
8 |
+
.DS_Store/
|
9 |
__pycache__
|
10 |
+
__pycache__/
|
11 |
.vscode
|
12 |
+
datasets
|
13 |
+
datasets/
|
14 |
+
.vscode/
|
huggingface.png
DELETED
Binary file (120 kB)
|
|
image_0.png
DELETED
Binary file (106 kB)
|
|
src/__pycache__/threshold_methods.cpython-310.pyc
ADDED
Binary file (3.49 kB). View file
|
|
src/__pycache__/yolo.cpython-310.pyc
ADDED
Binary file (2.09 kB). View file
|
|
src/test.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
from PIL import Image
|
3 |
+
import cv2
|
4 |
+
def draw_boxes(image, boxes):
|
5 |
+
for box in boxes:
|
6 |
+
|
7 |
+
x1, y1, x2, y2, name, prob = box
|
8 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
9 |
+
cv2.putText(image, f"{name} {prob:.2f}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,255,0), 2)
|
10 |
+
return image
|
11 |
+
def detect_objects_on_image(buf):
|
12 |
+
model = YOLO("yolov8n.pt")
|
13 |
+
results = model.predict(buf)
|
14 |
+
result = results[0]
|
15 |
+
output = []
|
16 |
+
for box in result.boxes:
|
17 |
+
x1, y1, x2, y2 = [
|
18 |
+
round(x) for x in box.xyxy[0].tolist()
|
19 |
+
]
|
20 |
+
class_id = box.cls[0].item()
|
21 |
+
prob = round(box.conf[0].item(), 2)
|
22 |
+
output.append([
|
23 |
+
x1, y1, x2, y2, result.names[class_id], prob
|
24 |
+
])
|
25 |
+
return output
|
26 |
+
|
27 |
+
|
28 |
+
img = cv2.imread('a.png')
|
29 |
+
boxes=detect_objects_on_image(img)
|
30 |
+
img_with_boxes = draw_boxes(img, boxes)
|
31 |
+
cv2.imshow("test",img_with_boxes)
|
32 |
+
cv2.waitKey(0)
|
threshold_methods.py → src/threshold_methods.py
RENAMED
@@ -51,7 +51,7 @@ def pirahansiah_threshold_method_find_threshold_values_1(grayImg):
|
|
51 |
|
52 |
|
53 |
|
54 |
-
path = [['image_0.png'],['huggingface.png']]
|
55 |
inputs_thresh = [
|
56 |
gr.inputs.Image(type="filepath", label="Input Image"),
|
57 |
gr.inputs.Radio(label="Threshold Methods",
|
|
|
51 |
|
52 |
|
53 |
|
54 |
+
path = [['files/image_0.png'],['files/huggingface.png']]
|
55 |
inputs_thresh = [
|
56 |
gr.inputs.Image(type="filepath", label="Input Image"),
|
57 |
gr.inputs.Radio(label="Threshold Methods",
|
src/yolo.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
+
from ultralytics import YOLO
|
5 |
+
def draw_boxes(image, boxes):
|
6 |
+
for box in boxes:
|
7 |
+
x1, y1, x2, y2, name, prob = box
|
8 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
9 |
+
cv2.putText(image, f"{name} {prob:.2f}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1.9, (255,55,0), 2)
|
10 |
+
return image
|
11 |
+
def detect_objects_on_image(buf):
|
12 |
+
model = YOLO("files/yolov8n.pt")
|
13 |
+
results = model.predict(buf)
|
14 |
+
result = results[0]
|
15 |
+
output = []
|
16 |
+
for box in result.boxes:
|
17 |
+
x1, y1, x2, y2 = [
|
18 |
+
round(x) for x in box.xyxy[0].tolist()
|
19 |
+
]
|
20 |
+
class_id = box.cls[0].item()
|
21 |
+
prob = round(box.conf[0].item(), 2)
|
22 |
+
output.append([
|
23 |
+
x1, y1, x2, y2, result.names[class_id], prob
|
24 |
+
])
|
25 |
+
return output
|
26 |
+
|
27 |
+
path = [['files/a.png'],['files/image_0.png'],['files/huggingface.png']]
|
28 |
+
inputs = [
|
29 |
+
gr.inputs.Image(type="filepath", label="Input Image"),
|
30 |
+
gr.inputs.Radio(label="YOLO Methods",
|
31 |
+
choices=[
|
32 |
+
"v8"
|
33 |
+
]),
|
34 |
+
|
35 |
+
]
|
36 |
+
outputs = [
|
37 |
+
gr.outputs.Image(type="numpy", label="Output YOLO Image")
|
38 |
+
]
|
39 |
+
def process_image(input_image, radio_choice,slider_val):
|
40 |
+
img = cv2.imread(input_image)
|
41 |
+
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
|
42 |
+
boxes=detect_objects_on_image(img)
|
43 |
+
img_with_boxes = draw_boxes(img, boxes)
|
44 |
+
return img_with_boxes
|
45 |
+
def on_change(radio_choice):
|
46 |
+
outputs[0].update(process_image(
|
47 |
+
inputs[0].value,
|
48 |
+
radio_choice)
|
49 |
+
)
|
50 |
+
yolo = gr.Interface(
|
51 |
+
fn=process_image,
|
52 |
+
inputs=inputs,
|
53 |
+
outputs=outputs,
|
54 |
+
on_change=on_change,
|
55 |
+
examples=path,
|
56 |
+
title="YOLO: Computer Vision and Deep Learning by Farshid PirahanSiah",
|
57 |
+
)
|