lucascriistiano commited on
Commit
b7cad3f
1 Parent(s): 9bd7097

feat: adjust model processing

Browse files
Files changed (5) hide show
  1. .gitignore +3 -0
  2. app.py +32 -8
  3. model/best.pt +3 -0
  4. requirements.txt +2 -1
  5. yolov5s.pt +3 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .DS_Store
2
+ .idea
3
+ .env
app.py CHANGED
@@ -1,20 +1,44 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import pipeline
4
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- model = pipeline('onnx', model='model/best.onnx')
7
 
8
  def object_detection(image):
 
 
9
  results = model(image)
10
 
11
- return {
12
- 'image': image,
13
- 'detections': results
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- inputs = gr.inputs.Image()
17
- outputs = gr.outputs.Image(label='Input Image', type='numpy'), gr.outputs.JSON(label='Object Detections')
18
 
19
  iface = gr.Interface(fn=object_detection, inputs=inputs, outputs=outputs)
20
 
 
1
  import gradio as gr
2
  import torch
3
+ import cv2
4
 
5
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'model/best.onnx')
6
+
7
+
8
+ CLASS_COLORS = {
9
+ 0: [148, 0, 211], # class 1 (violet)
10
+ 1: [255, 0, 0], # class 2 (red)
11
+ 2: [255, 127, 0], # class 3 (orange)
12
+ 3: [255, 255, 0], # class 4 (yellow)
13
+ 4: [0, 255, 0], # class 5 (green)
14
+ 5: [0, 0, 255], # class 6 (blue)
15
+ }
16
 
 
17
 
18
  def object_detection(image):
19
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
20
+
21
  results = model(image)
22
 
23
+ bboxes = results.xyxy[0].tolist()
24
+ labels = results.xyxy[0][:, -1].long().tolist()
25
+ scores = results.xyxy[0][:, -2].tolist()
26
+
27
+ for bbox, label, score in zip(bboxes, labels, scores):
28
+ label_name = results.names[label]
29
+ color = CLASS_COLORS[label]
30
+
31
+ cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
32
+ text = f"{label_name} ({score:.2f})"
33
+ cv2.putText(image, text, (int(bbox[0]), int(bbox[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
34
+
35
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
36
+
37
+ return image
38
+
39
 
40
+ inputs = gr.components.Image(shape=(640, 640))
41
+ outputs = gr.components.Image(label='Input Image', shape=(640, 640))
42
 
43
  iface = gr.Interface(fn=object_detection, inputs=inputs, outputs=outputs)
44
 
model/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dd7ed03452e8faaf24a3d3c7b4b6694e369ab54d6c807dc48b5c0d6ac675fc1
3
+ size 14468157
requirements.txt CHANGED
@@ -1,2 +1,3 @@
 
 
1
  torch==2.0.0
2
- git+https://github.com/huggingface/transformers.git
 
1
+ gradio==3.28.3
2
+ opencv-python==4.7.0.72
3
  torch==2.0.0
 
yolov5s.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b3b748c1e592ddd8868022e8732fde20025197328490623cc16c6f24d0782ee
3
+ size 14808437