harsh13333 commited on
Commit
a6b1f46
·
verified ·
1 Parent(s): f26a67e

Upload 8 files

Browse files
.env ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ DET_MODEL_PATH = 'person_detection_v3.pt'
2
+ ACTIVITY_DET_MODEL_PATH = 'final_activity_detection.pt'
3
+ IMG_DIR_PATH = 'images/valid'
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import os
4
+ import io
5
+ import numpy as np
6
+ from PIL import Image
7
+ from inferance import pipline
8
+ import pandas as pd
9
+
10
+
11
+ code = """
12
+ <style>
13
+ .block-container{
14
+ max-width: 100%;
15
+ padding: 50px;
16
+ }
17
+ # [data-testid="stImage"], .e115fcil2, [data-testid="StyledFullScreenButton"], [data-testid="stFullScreenFrame"].e1vs0wn30, [data-testid="element-container"].e1f1d6gn4.element-container{
18
+ # width: fit-content !important;
19
+ # }
20
+ # [data-testid="stVerticalBlock"].e1f1d6gn2{
21
+ # flex-direction: row;
22
+ # flex-wrap: wrap;
23
+ # }
24
+ [data-testid="StyledFullScreenButton"]{
25
+ display: none;
26
+ }
27
+ [data-testid="stVerticalBlockBorderWrapper"], [data-testid="stVerticalBlock"]{
28
+ width: 100%;
29
+ }
30
+ .e115fcil2{
31
+ justify-content: center;
32
+ margin-top: 20px;
33
+ }
34
+ </style>
35
+ """
36
+ st.html(code)
37
+
38
+ st.title("Automated Surveillance System")
39
+
40
+ col1, col2 = st.columns([5, 5])
41
+ container = col2.container(height=800)
42
+ col3, col4= container.columns([1,1])
43
+
44
+ with col1:
45
+
46
+ image = st.file_uploader("File upload", label_visibility="hidden")
47
+
48
+ if image is not None:
49
+ image = Image.open(io.BytesIO(image.getvalue()))
50
+ image = np.asarray(image)
51
+
52
+ cv2.imwrite("image.jpg", image)
53
+ image = cv2.imread("image.jpg")
54
+
55
+ results = pipline(image)
56
+
57
+ for result in results:
58
+ image = cv2.rectangle(image, result['updated_boxes']['top_left'], result['updated_boxes']['bottom_right'], (255, 0, 0), 1)
59
+
60
+ st.image(image)
61
+
62
+ else:
63
+ image = cv2.imread("default_img.jpg")
64
+
65
+ results = pipline(image)
66
+
67
+ for result in results:
68
+ image = cv2.rectangle(image, result['updated_boxes']['top_left'], result['updated_boxes']['bottom_right'], (255, 0, 0), 1)
69
+
70
+ st.image(image)
71
+
72
+ if image is not None:
73
+ with col2:
74
+ results_1 = results[:len(results)//2]
75
+ results_2 = results[len(results)//2:]
76
+ with col4:
77
+ for result in results_1:
78
+ img = result['zoomed_img']
79
+
80
+ df = pd.DataFrame(columns=['Object Type', 'Distance', 'Activity'])
81
+ actual_width, actual_height = result['updated_boxes']['bottom_right'][0] - result['updated_boxes']['top_left'][0], result['updated_boxes']['bottom_right'][1] - result['updated_boxes']['top_left'][1]
82
+
83
+ for box in result['actual_boxes']:
84
+ top_left = (box['top_left'][0] - result['updated_boxes']['top_left'][0], (box['top_left'][1] - result['updated_boxes']['top_left'][1]))
85
+ bottom_right = (box['bottom_right'][0] - result['updated_boxes']['top_left'][0], (box['bottom_right'][1] - result['updated_boxes']['top_left'][1]))
86
+
87
+ print(img.shape, actual_height, actual_width)
88
+ bottom_right = (bottom_right[0]*img.shape[0]//(actual_height), bottom_right[1]*img.shape[1]//(actual_width))
89
+ top_left = (top_left[0]*img.shape[0]//(actual_height), top_left[1]*img.shape[1]//(actual_width))
90
+
91
+ print(box['top_left'], result['updated_boxes']['top_left'], box['bottom_right'], result['updated_boxes']['bottom_right'], top_left, bottom_right)
92
+ img = cv2.rectangle(img, top_left, bottom_right, (255, 0, 0), 1)
93
+ img = cv2.putText(img, "ID: "+str(len(df)), top_left, 1, 1, (255, 255, 255))
94
+ df.loc[len(df)] = [box['class'], box['distance'], box['activity']]
95
+
96
+ st.image(img)
97
+ st.table(df)
98
+
99
+ with col3:
100
+ for result in results_2:
101
+ img = result['zoomed_img']
102
+
103
+ df = pd.DataFrame(columns=['Object Type', 'Distance', 'Activity'])
104
+ actual_width, actual_height = result['updated_boxes']['bottom_right'][0] - result['updated_boxes']['top_left'][0], result['updated_boxes']['bottom_right'][1] - result['updated_boxes']['top_left'][1]
105
+
106
+ for box in result['actual_boxes']:
107
+ top_left = (box['top_left'][0] - result['updated_boxes']['top_left'][0], (box['top_left'][1] - result['updated_boxes']['top_left'][1]))
108
+ bottom_right = (box['bottom_right'][0] - result['updated_boxes']['top_left'][0], (box['bottom_right'][1] - result['updated_boxes']['top_left'][1]))
109
+
110
+ print(img.shape, actual_height, actual_width)
111
+ bottom_right = (bottom_right[0]*img.shape[0]//(actual_height), bottom_right[1]*img.shape[1]//(actual_width))
112
+ top_left = (top_left[0]*img.shape[0]//(actual_height), top_left[1]*img.shape[1]//(actual_width))
113
+
114
+ print(box['top_left'], result['updated_boxes']['top_left'], box['bottom_right'], result['updated_boxes']['bottom_right'], top_left, bottom_right)
115
+ img = cv2.rectangle(img, top_left, bottom_right, (255, 0, 0), 1)
116
+ img = cv2.putText(img, "ID: "+str(len(df)), top_left, 1, 1, (255, 255, 255))
117
+ df.loc[len(df)] = [box['class'], box['distance'], box['activity']]
118
+
119
+ st.image(img)
120
+ st.table(df)
default_img.jpg ADDED
final_activity_detection.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70759328a3222fb40001170759f7ed6577acd54f3283d087f9fbe63974989ee6
3
+ size 2968321
inferance.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from pipline_functions import croped_images,object_detection,image_enhancements,detect_activity,get_distances,get_json_data
3
+ import os
4
+
5
+ def pipline(image):
6
+ """_summary_
7
+
8
+ Args:
9
+ image (numpy array): get numpy array of image which has 3 channels
10
+
11
+ Returns:
12
+ final_results: JSON Array which has below object
13
+ {
14
+ 'zoomed_img':np.array([]) ,
15
+ 'actual_boxes':[],
16
+ 'updated_boxes':{},
17
+ }
18
+ """
19
+ # detect object of given image using YOLO and get json_data of each object
20
+ json_data = object_detection(image)
21
+
22
+ # get croped_images list which has overlapping boundry box and also get croped single object images
23
+ croped_images_list,single_object_images= croped_images(image,json_data)
24
+
25
+ # enhance images of both croped images and single object images
26
+ enhanced_images,single_object_images = image_enhancements(croped_images_list,single_object_images)
27
+
28
+ # detect activity of person object using image classification
29
+ detected_activity = detect_activity(single_object_images)
30
+
31
+ # Calculate distances of all objects
32
+ distances_list = get_distances(json_data)
33
+
34
+ # get final json array
35
+ final_results = get_json_data(json_data,enhanced_images,detected_activity,distances_list)
36
+
37
+ # print(distances_list)
38
+ # print(detected_activity)
39
+ # print(final_results)
40
+
41
+ return final_results
42
+
43
+ pipline(cv2.imread('distance_test\distance_test\images\car_99-94168281555176_Mon-Dec-13-16-37-40-2021_jpg.rf.a8c56aba60dd3a19f2c2f159a2c9062d.jpg'))
person_detection_v3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64b1203bd0c8e4fb317eb0b11816a9e2a95ab887f708d9ede3c4ef40f1daf94c
3
+ size 52026625
pipline_functions.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from ultralytics import YOLO
3
+ import os
4
+ from dotenv import load_dotenv
5
+ from pathlib import Path
6
+ import math
7
+ import json
8
+ import numpy as np
9
+
10
+ env_path = Path('.') / '.env'
11
+ load_dotenv(dotenv_path=env_path)
12
+
13
+ path = {
14
+ 'DET_MODEL_PATH': str(os.getenv('DET_MODEL_PATH')),
15
+ 'IMG_DIR_PATH': str(os.getenv('IMG_DIR_PATH')),
16
+ 'ACTIVITY_DET_MODEL_PATH':str(os.getenv('ACTIVITY_DET_MODEL_PATH')),
17
+ }
18
+ #constants
19
+ PERSON_HEIGHT = 1.5
20
+ VEHICAL_HEIGHT = 1.35
21
+ ANIMAL_HEIGHT = 0.6
22
+ FOCAL_LENGTH = 6400
23
+ # CONF = 0.0
24
+
25
+ #Load models
26
+ det_model = YOLO(path['DET_MODEL_PATH'])
27
+ activity_det_model = YOLO(path['ACTIVITY_DET_MODEL_PATH'])
28
+
29
+ activity_classes = ['Standing','Running','Sitting']
30
+
31
+ def object_detection(image):
32
+
33
+ """
34
+ Args:
35
+ image (numpy array): get numpy array of image which has 3 channels
36
+
37
+ Returns:
38
+ new_boxes: returns json object which has below format
39
+ [
40
+ {
41
+ "actual_boundries": [
42
+ {
43
+ "top_left": [48, 215],
44
+ "bottom_right": [62, 245],
45
+ "class": "person"
46
+ }
47
+ ],
48
+ "updated_boundries": {
49
+ "top_left": [41, 199],
50
+ "bottom_right": [73, 269],
51
+ "person_count": 1,
52
+ "vehical_count": 0,
53
+ "animal_count": 0
54
+ }
55
+ }
56
+ ]
57
+ """
58
+
59
+ #detect object using yolo model
60
+ results = det_model(image)
61
+
62
+ boxes = results[0].boxes.xyxy.tolist()
63
+ classes = results[0].boxes.cls.tolist()
64
+ names = results[0].names
65
+ confidences = results[0].boxes.conf.tolist()
66
+ ctr = 0
67
+ my_boxes = [] # ((x1, y1), (x2,y2), person_count, vehical_count, animal_count)
68
+
69
+ for box, cls, conf in zip(boxes, classes, confidences):
70
+ x1, y1, x2, y2 = box
71
+ name = names[int(cls)]
72
+ my_obj = {"actual_boundries": [{"top_left": (int(x1), int(y1)),
73
+ "bottom_right": (int(x2), int(y2)),
74
+ "class": name}]}
75
+ # img = cv2.imread(img_path)
76
+ x1 = max(0, x1 - (x2-x1)/2)
77
+ y1 = max(0, y1 - (y2-y1)/2)
78
+ x2 = min(len(image[0])-1, x2 + (x2-x1)/2)
79
+ y2 = min(len(image)-1, y2 + (y2-y1)/2)
80
+ x1, y1, x2, y2 = math.floor(x1), math.floor(y1), math.ceil(x2), math.ceil(y2)
81
+ # image = cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
82
+ my_obj["updated_boundries"] = {"top_left": (x1, y1),
83
+ "bottom_right": (x2, y2),
84
+ "person_count": 1 if name == 'person' else 0,
85
+ "vehical_count": 1 if name == 'vehical' else 0,
86
+ "animal_count": 1 if name == 'animal' else 0}
87
+ my_boxes.append(my_obj)
88
+ ctr += 1
89
+ my_boxes.sort(key=lambda x: (x['updated_boundries']['top_left'], x['updated_boundries']['bottom_right']))
90
+
91
+ new_boxes = []
92
+ if len(my_boxes) > 0:
93
+ new_boxes.append(my_boxes[0])
94
+
95
+ for indx, box in enumerate(my_boxes):
96
+ if indx != 0:
97
+ top_left_last = new_boxes[-1]['updated_boundries']['top_left']
98
+ bottom_right_last = new_boxes[-1]['updated_boundries']['bottom_right']
99
+ top_left_curr = box['updated_boundries']['top_left']
100
+ bottom_right_curr = box['updated_boundries']['bottom_right']
101
+
102
+ if bottom_right_last[0] >= top_left_curr[0] and bottom_right_last[1] >= top_left_curr[1]:
103
+ new_x1 = min(top_left_last[0], top_left_curr[0])
104
+ new_y1 = min(top_left_last[1], top_left_curr[1])
105
+ new_x2 = max(bottom_right_last[0], bottom_right_curr[0])
106
+ new_y2 = max(bottom_right_last[1], bottom_right_curr[1])
107
+
108
+ new_boxes[-1]['actual_boundries'] += box['actual_boundries']
109
+ new_boxes[-1]['updated_boundries'] = {"top_left": (new_x1, new_y1),
110
+ "bottom_right": (new_x2, new_y2),
111
+ "person_count": new_boxes[-1]['updated_boundries']['person_count'] + box['updated_boundries']['person_count'],
112
+ "vehical_count": new_boxes[-1]['updated_boundries']['vehical_count'] + box['updated_boundries']['vehical_count'],
113
+ "animal_count": new_boxes[-1]['updated_boundries']['animal_count'] + box['updated_boundries']['animal_count']}
114
+ else:
115
+ new_boxes.append(box)
116
+
117
+ return new_boxes
118
+
119
+ def croped_images(image,new_boxes):
120
+ """_summary_
121
+
122
+ Args:
123
+ image (numpy array): get numpy array of image which has 3 channels
124
+ new_boxes (json array): get json array
125
+
126
+ Returns:
127
+ croped_images_list(list of numpy array): returns list which has croped images
128
+ single_object_images(list of numpy array): returns list which has single object images
129
+ """
130
+ croped_images_list = []
131
+ single_object_images = []
132
+
133
+ for data in new_boxes:
134
+ print(data['updated_boundries'])
135
+ crop_image = image[data['updated_boundries']['top_left'][1]:data['updated_boundries']['bottom_right'][1],data['updated_boundries']['top_left'][0]:data['updated_boundries']['bottom_right'][0]]
136
+ croped_images_list.append(crop_image)
137
+
138
+ for object in data['actual_boundries']:
139
+ if object['class']=='person':
140
+ crop_object= image[object['top_left'][1]:object['bottom_right'][1],object['top_left'][0]:object['bottom_right'][0]]
141
+ single_object_images.append(crop_object)
142
+
143
+
144
+ return croped_images_list,single_object_images
145
+
146
+ def image_enhancements(croped_images_list,single_object_images):
147
+ """_summary_
148
+
149
+ Args:
150
+ croped_images_list (list numpy array): croped images list
151
+ single_object_images (list numpy array): single object images list
152
+
153
+ Returns:
154
+ enhanced croped images: returns enhanced images
155
+ enhanced single_object_images: returns enhanced images
156
+ """
157
+ enhanced_images = []
158
+ enhanced_single_object_images = []
159
+
160
+ for image in croped_images_list:
161
+
162
+ # resize the image
163
+ res = cv2.resize(image,(500*image.shape[1]//image.shape[0],500), interpolation = cv2.INTER_CUBIC)
164
+
165
+ # brightness and contrast
166
+ brightness = 16
167
+ contrast = 0.95
168
+ res2 = cv2.addWeighted(res, contrast, np.zeros(res.shape, res.dtype), 0, brightness)
169
+
170
+ # Sharpen the image
171
+ kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
172
+ sharpened_image = cv2.filter2D(res2, -1, kernel)
173
+
174
+ #append in the list
175
+ enhanced_images.append(sharpened_image)
176
+
177
+
178
+ for image in single_object_images:
179
+
180
+ # resize the image
181
+ res = cv2.resize(image,(500*image.shape[1]//image.shape[0],500), interpolation = cv2.INTER_CUBIC)
182
+
183
+ # brightness and contrast
184
+ brightness = 16
185
+ contrast = 0.95
186
+ res2 = cv2.addWeighted(res, contrast, np.zeros(res.shape, res.dtype), 0, brightness)
187
+
188
+ # Sharpen the image
189
+ kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
190
+ sharpened_image = cv2.filter2D(res2, -1, kernel)
191
+
192
+ #append enhnaced single object image
193
+ enhanced_single_object_images.append(sharpened_image)
194
+
195
+ return enhanced_images,enhanced_single_object_images
196
+
197
+
198
+ def detect_activity(single_object_images):
199
+ """_summary_
200
+
201
+ Args:
202
+ single_object_images (list of numpy array): list of single object images
203
+
204
+ Returns:
205
+ activities(list of strings): returns list of activities perform by person
206
+ """
207
+ activities = []
208
+
209
+ for img in single_object_images:
210
+
211
+ predictions =activity_det_model.predict(img)
212
+
213
+ for result in predictions:
214
+
215
+ probs = result.probs
216
+ class_index = probs.top1
217
+
218
+ activities.append(activity_classes[class_index])
219
+
220
+ return activities
221
+
222
+
223
+ def get_distances(new_boxes):
224
+ """_summary_
225
+
226
+ Args:
227
+ new_boxes (json array): takes json array of detected image's data
228
+
229
+ Returns:
230
+ distance_list: list of distances of each object
231
+ """
232
+
233
+ distance_list = []
234
+ for box in new_boxes:
235
+ for actual_box in box['actual_boundries']:
236
+ height = actual_box['bottom_right'][1] - actual_box['top_left'][1]
237
+
238
+ if actual_box['class'] == "person":
239
+ distance = FOCAL_LENGTH*PERSON_HEIGHT/height
240
+
241
+ elif actual_box['class'] == "vehical":
242
+ distance = FOCAL_LENGTH*PERSON_HEIGHT/height
243
+
244
+ else:
245
+ distance = FOCAL_LENGTH*PERSON_HEIGHT/height
246
+
247
+ distance_list.append(str(round(distance)) + "m")
248
+
249
+ return distance_list
250
+
251
+
252
+ def get_json_data(json_data,enhanced_images,detected_activity,distances_list):
253
+ """_summary_
254
+
255
+ Args:
256
+ json_data (json Array): get json data of image
257
+ enhanced_images (list of numpy array): list of enhanced images
258
+ detected_activity (list of strings): list of activities of person
259
+ distances_list (lsit of integers): list of distances of each object
260
+
261
+ Returns:
262
+ results(json Array): contains all informations needed for frontend
263
+ {'zoomed_img':np.array([]) ,
264
+ 'actual_boxes':[],
265
+ 'updated_boxes':{},
266
+ }
267
+ """
268
+ results = []
269
+ object_count = 0
270
+ activity_count = 0
271
+ for idx,box in enumerate(json_data):
272
+ final_json_output = {'zoomed_img':np.array([]) ,
273
+ 'actual_boxes':[],
274
+ 'updated_boxes':{},
275
+ }
276
+
277
+ final_json_output['zoomed_img'] = enhanced_images[idx]
278
+ final_json_output['updated_boxes'] = { "top_left": box['updated_boundries']['top_left'],
279
+ "bottom_right": box['updated_boundries']['bottom_right']}
280
+
281
+ for actual_box in box['actual_boundries']:
282
+
283
+ temp = {"top_left": [],
284
+ "bottom_right": [],
285
+ "class": "",
286
+ "distance":0,
287
+ "activity":'none'}
288
+ temp['top_left'] = actual_box['top_left']
289
+ temp['bottom_right'] = actual_box['bottom_right']
290
+ temp['class'] = actual_box['class']
291
+ temp['distance'] = distances_list[object_count]
292
+ object_count+=1
293
+
294
+ if temp['class'] == 'person':
295
+ temp['activity'] = detected_activity[activity_count]
296
+ activity_count+=1
297
+
298
+ final_json_output['actual_boxes'].append(temp)
299
+ final_json_output = fix_distance(final_json_output)
300
+
301
+ results.append(final_json_output)
302
+
303
+ return results
304
+
305
+
306
+ def fix_distance(final_json_output):
307
+ """_summary_
308
+
309
+ Args:
310
+ final_json_output (json Array): array of json object
311
+
312
+ Returns:
313
+ final_json_output (json Array): array of json object
314
+ """
315
+ distances = []
316
+ DIFF = 90
317
+
318
+ for idx,box in enumerate(final_json_output['actual_boxes']):
319
+ distances.append({'idx':idx,'distance':int(box['distance'][:-1])})
320
+
321
+ sorted_dist = sorted(distances, key=lambda d: d['distance'])
322
+ sum_dist = []
323
+ idx= 0
324
+ sum_dist.append({'sum':sorted_dist[0]['distance'],'idxes':[sorted_dist[0]['idx']]})
325
+
326
+ for i in range(1,len(sorted_dist)):
327
+ print(sorted_dist[i]['distance'],sorted_dist[i-1]['distance'])
328
+ if abs(sorted_dist[i]['distance']-sorted_dist[i-1]['distance']) <=DIFF:
329
+ sum_dist[idx]['sum']+= sorted_dist[i]['distance']
330
+ sum_dist[idx]['idxes'].append(sorted_dist[i]['idx'])
331
+
332
+ else:
333
+ sum_dist.append({'sum':sorted_dist[i]['distance'],'idxes':[sorted_dist[i]['idx']]})
334
+ idx+=1
335
+
336
+ #change values in distance array
337
+ for data in sum_dist:
338
+ count = len(data['idxes'])
339
+ mean = data['sum']//count
340
+ for i in data['idxes']:
341
+ final_json_output['actual_boxes'][i]['distance'] = str(mean)+'m'
342
+
343
+ return final_json_output
344
+
345
+
requirements.txt ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.1.0
2
+ altair==5.3.0
3
+ asttokens==2.4.1
4
+ astunparse==1.6.3
5
+ attrs==23.2.0
6
+ backcall==0.2.0
7
+ beautifulsoup4==4.12.3
8
+ bleach==6.1.0
9
+ blinker==1.8.1
10
+ cachetools==5.3.3
11
+ certifi==2024.2.2
12
+ charset-normalizer==3.3.2
13
+ click==8.1.7
14
+ colorama==0.4.6
15
+ contourpy==1.2.1
16
+ cycler==0.12.1
17
+ decorator==5.1.1
18
+ defusedxml==0.7.1
19
+ docopt==0.6.2
20
+ executing==2.0.1
21
+ fastjsonschema==2.19.1
22
+ filelock==3.14.0
23
+ flatbuffers==24.3.25
24
+ fonttools==4.51.0
25
+ fsspec==2024.3.1
26
+ gast==0.5.4
27
+ gitdb==4.0.11
28
+ GitPython==3.1.43
29
+ google-pasta==0.2.0
30
+ grpcio==1.63.0
31
+ h5py==3.11.0
32
+ idna==3.7
33
+ importlib_metadata==7.1.0
34
+ importlib_resources==6.4.0
35
+ intel-openmp==2021.4.0
36
+ ipython==8.12.3
37
+ jedi==0.19.1
38
+ Jinja2==3.1.3
39
+ jsonschema==4.22.0
40
+ jsonschema-specifications==2023.12.1
41
+ jupyter_client==8.6.1
42
+ jupyter_core==5.7.2
43
+ jupyterlab_pygments==0.3.0
44
+ keras==3.3.3
45
+ kiwisolver==1.4.5
46
+ libclang==18.1.1
47
+ Markdown==3.6
48
+ markdown-it-py==3.0.0
49
+ MarkupSafe==2.1.5
50
+ matplotlib==3.8.4
51
+ matplotlib-inline==0.1.7
52
+ mdurl==0.1.2
53
+ mistune==3.0.2
54
+ mkl==2021.4.0
55
+ ml-dtypes==0.3.2
56
+ mpmath==1.3.0
57
+ namex==0.0.8
58
+ nbclient==0.10.0
59
+ nbconvert==7.16.4
60
+ nbformat==5.10.4
61
+ networkx==3.2.1
62
+ numpy==1.26.4
63
+ opencv-python==4.9.0.80
64
+ opt-einsum==3.3.0
65
+ optree==0.11.0
66
+ packaging==24.0
67
+ pandas==2.2.2
68
+ pandocfilters==1.5.1
69
+ parso==0.8.4
70
+ pickleshare==0.7.5
71
+ pillow==10.3.0
72
+ platformdirs==4.2.1
73
+ prompt-toolkit==3.0.43
74
+ protobuf==4.25.3
75
+ psutil==5.9.8
76
+ pure-eval==0.2.2
77
+ py-cpuinfo==9.0.0
78
+ pyarrow==16.0.0
79
+ pydeck==0.9.0
80
+ Pygments==2.17.2
81
+ pyparsing==3.1.2
82
+ python-dateutil==2.9.0.post0
83
+ python-dotenv==1.0.1
84
+ pytz==2024.1
85
+ pywin32==306
86
+ PyYAML==6.0.1
87
+ pyzmq==26.0.3
88
+ referencing==0.35.1
89
+ requests==2.31.0
90
+ rich==13.7.1
91
+ rpds-py==0.18.0
92
+ scipy==1.13.0
93
+ seaborn==0.13.2
94
+ six==1.16.0
95
+ smmap==5.0.1
96
+ soupsieve==2.5
97
+ stack-data==0.6.3
98
+ streamlit==1.34.0
99
+ sympy==1.12
100
+ tbb==2021.12.0
101
+ tenacity==8.2.3
102
+ tensorboard==2.16.2
103
+ tensorboard-data-server==0.7.2
104
+ tensorflow==2.16.1
105
+ tensorflow-intel==2.16.1
106
+ tensorflow-io-gcs-filesystem==0.31.0
107
+ termcolor==2.4.0
108
+ thop==0.1.1.post2209072238
109
+ tinycss2==1.3.0
110
+ toml==0.10.2
111
+ toolz==0.12.1
112
+ torch==2.3.0
113
+ torchvision==0.18.0
114
+ tornado==6.4
115
+ tqdm==4.66.2
116
+ traitlets==5.14.3
117
+ typing_extensions==4.11.0
118
+ tzdata==2024.1
119
+ ultralytics==8.2.6
120
+ urllib3==2.2.1
121
+ watchdog==4.0.0
122
+ wcwidth==0.2.13
123
+ webencodings==0.5.1
124
+ Werkzeug==3.0.2
125
+ wrapt==1.16.0
126
+ yarg==0.1.9
127
+ zipp==3.18.1