Dricz commited on
Commit
e1e6433
1 Parent(s): 53beac1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +105 -0
app.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import matplotlib.pyplot as plt
3
+ from PIL import Image
4
+ from ultralyticsplus import YOLO, render_result
5
+ import cv2
6
+ import numpy as np
7
+ from transformers import pipeline
8
+
9
+ model = YOLO('best (1).pt')
10
+ model2 = pipeline('image-classification','Kaludi/csgo-weapon-classification')
11
+ name = ['grenade','knife','pistol','rifle']
12
+
13
+ # for i, r in enumerate(results):
14
+
15
+ # # Plot results image
16
+ # im_bgr = r.plot()
17
+ # im_rgb = im_bgr[..., ::-1] # Convert BGR to RGB
18
+
19
+ def response(image):
20
+ print(image)
21
+ results = model(image)
22
+ text = ""
23
+ name_weap = ""
24
+
25
+ for r in results:
26
+ conf = np.array(r.boxes.conf)
27
+ cls = np.array(r.boxes.cls)
28
+ cls = cls.astype(int)
29
+ xywh = np.array(r.boxes.xywh)
30
+ xywh = xywh.astype(int)
31
+
32
+ for con, cl, xy in zip(conf, cls, xywh):
33
+ cone = con.astype(float)
34
+ conef = round(cone,3)
35
+ conef = conef * 100
36
+ text += (f"Detected {name[cl]} with confidence {round(conef,1)}% at ({xy[0]},{xy[1]})\n")
37
+
38
+ if cl == 0:
39
+ name_weap += name[cl] + '\n'
40
+ elif cl == 1:
41
+ name_weap += name[cl] + '\n'
42
+ elif cl == 2:
43
+ out = model2(image)
44
+ name_weap += out[0]["label"] + '\n'
45
+ elif cl == 3:
46
+ out = model2(image)
47
+ name_weap += out[0]["label"] + '\n'
48
+
49
+
50
+ # im_rgb = Image.fromarray(im_rgb)
51
+
52
+
53
+ return name_weap, text
54
+
55
+
56
+
57
+ def response2(image: gr.Image = None,image_size: gr.Slider = 640, conf_threshold: gr.Slider = 0.3, iou_threshold: gr.Slider = 0.6):
58
+
59
+ results = model.predict(image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size)
60
+
61
+ box = results[0].boxes
62
+
63
+ render = render_result(model=model, image=image, result=results[0], rect_th = 1, text_th = 1)
64
+
65
+
66
+ weapon_name, text_detection = response(image)
67
+
68
+
69
+ # xywh = int(results.boxes.xywh)
70
+ # x = xywh[0]
71
+ # y = xywh[1]
72
+
73
+
74
+
75
+ return render, text_detection, weapon_name
76
+
77
+
78
+ inputs = [
79
+ gr.Image(type="filepath", label="Input Image"),
80
+ gr.Slider(minimum=320, maximum=1280, value=640,
81
+ step=32, label="Image Size"),
82
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.3,
83
+ step=0.05, label="Confidence Threshold"),
84
+ gr.Slider(minimum=0.0, maximum=1.0, value=0.6,
85
+ step=0.05, label="IOU Threshold"),
86
+ ]
87
+
88
+
89
+ outputs = [gr.Image( type="filepath", label="Output Image"),
90
+ gr.Textbox(label="Result"),
91
+ gr.Textbox(label="Weapon Name")
92
+ ]
93
+
94
+
95
+ # examples = [['th (11).jpg', 640, 0.3, 0.6],
96
+ # ['th (8).jpg', 640, 0.3, 0.6],
97
+ # ['th (3).jpg', 640, 0.3, 0.6],
98
+ # ['th.jpg', 640, 0.15, 0.6]
99
+ # ]
100
+
101
+
102
+
103
+
104
+ iface = gr.Interface(fn=response2, inputs=inputs, outputs=outputs)
105
+ iface.launch(debug=True)