NAGA commited on
Commit
d8e1649
1 Parent(s): c934786

Upload 6 files

Browse files
Files changed (6) hide show
  1. Crime_Y5.pt +3 -0
  2. app.py +78 -0
  3. bourdon-defence-55.jpg +0 -0
  4. images (2).jpg +0 -0
  5. images (3).jpg +0 -0
  6. images (4).jpg +0 -0
Crime_Y5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5fd2016db0538184c11e44c23681cf69e085c501c1367dc80b2c7401dc9ab39
3
+ size 42250345
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio.outputs import Label
3
+ import cv2
4
+ import requests
5
+ import os
6
+ import numpy as np
7
+
8
+ from ultralytics import YOLO
9
+ import yolov5
10
+
11
+ file_urls = [
12
+ 'https://c8.alamy.com/zooms/9/382c1e254fe14207998df9ec56354291/wce5tj.jpg',
13
+ 'https://c8.alamy.com/comp/W20YYR/freight-container-on-the-back-of-a-truck-stuck-in-traffic-on-the-interstate-in-georgia-usa-W20YYR.jpg',
14
+ 'https://www.shutterstock.com/shutterstock/photos/318604739/display_1500/stock-photo-highway-and-container-truck-at-china-318604739.jpg'
15
+
16
+ ]
17
+
18
+ def download_file(url, save_name):
19
+ url = url
20
+ if not os.path.exists(save_name):
21
+ file = requests.get(url)
22
+ open(save_name, 'wb').write(file.content)
23
+
24
+ for i, url in enumerate(file_urls):
25
+ download_file(
26
+ file_urls[i],
27
+ f"image_{i}.jpg"
28
+ )
29
+
30
+ # model_path = 'Container_YOLOV5'
31
+
32
+ def yolov5_inference(
33
+ image: gr.inputs.Image = None,
34
+ model_path: gr.inputs.Dropdown = None,
35
+ image_size: gr.inputs.Slider = 640,
36
+ conf_threshold: gr.inputs.Slider = 0.25,
37
+ iou_threshold: gr.inputs.Slider = 0.45 ):
38
+
39
+
40
+ model = yolov5.load(model_path, device="cpu")
41
+ model.conf = conf_threshold
42
+ model.iou = iou_threshold
43
+ results = model([image], size=image_size)
44
+ crops = results.crop(save=False)
45
+ img_crops = []
46
+ for i in range(len(crops)):
47
+ img_crops.append(crops[i]["im"][..., ::-1])
48
+ return results.render()[0], img_crops
49
+
50
+
51
+ inputs = [
52
+ gr.inputs.Image(type="pil", label="Input Image"),
53
+ gr.inputs.Dropdown(["Crime_Y5.pt","yolov5s.pt", "yolov5m.pt", "yolov5l.pt", "yolov5x.pt"], label="Model", default = 'Crime_Y5.pt'),
54
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
55
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
56
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
57
+ ]
58
+
59
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
60
+ outputs_crops = gr.Gallery(label="Object crop")
61
+ title = "Container code detection - YOLO V5"
62
+ description = "YOLOv5 is a family of object detection models pretrained on COCO dataset. This model is a pip implementation of the original YOLOv5 model."
63
+
64
+ examples = [['1.jpg', 'Crime_Y5.pt', 640, 0.35, 0.45]
65
+ ,['2.jpg', 'Crime_Y5.pt', 640, 0.35, 0.45]
66
+ ,['4.jpg', 'Crime_Y5.pt', 640, 0.35, 0.45]]
67
+
68
+ demo_app = gr.Interface(
69
+ fn=yolov5_inference,
70
+ inputs=inputs,
71
+ outputs=[outputs,outputs_crops],
72
+ title=title,
73
+ examples=examples,
74
+ cache_examples=True,
75
+ live=True,
76
+ theme='huggingface',
77
+ )
78
+ demo_app.launch(debug=True, enable_queue=True, width=50, height=50)
bourdon-defence-55.jpg ADDED
images (2).jpg ADDED
images (3).jpg ADDED
images (4).jpg ADDED