oskarastrom commited on
Commit
592e4db
1 Parent(s): 752c2e9

Upload old results

Browse files
Files changed (7) hide show
  1. annotation_editor.js +4 -1
  2. annotation_handler.py +3 -4
  3. app.py +90 -47
  4. aris.py +49 -12
  5. main.py +5 -6
  6. state_handler.py +1 -0
  7. visualizer.py +6 -4
annotation_editor.js CHANGED
@@ -25,6 +25,7 @@
25
 
26
  show_frame();
27
  }
 
28
  window.prev_frame = () => {
29
  window.frame_index = Math.max(window.frame_index - 1, 0);
30
  show_frame();
@@ -105,7 +106,9 @@
105
  prettify_annotation();
106
  }
107
  color_from_id = (id) => {
108
- return 'hsl(' + Math.floor((id*id)*57 % 360) + ', 100%, 50%)'
 
 
109
  }
110
 
111
  window.prettify_annotation = () => {
 
25
 
26
  show_frame();
27
  }
28
+
29
  window.prev_frame = () => {
30
  window.frame_index = Math.max(window.frame_index - 1, 0);
31
  show_frame();
 
106
  prettify_annotation();
107
  }
108
  color_from_id = (id) => {
109
+ c = Math.ceil(Math.log2(id));
110
+ hue = (2*id - Math.pow(2,c) - 1) / Math.pow(2,c);
111
+ return 'hsl(' + Math.floor(hue*359) + ', 100%, 50%)'
112
  }
113
 
114
  window.prettify_annotation = () => {
annotation_handler.py CHANGED
@@ -6,7 +6,7 @@ import base64
6
 
7
  VIDEO_HEIGHT = 700
8
 
9
- def load_frames(video_path, preds_path):
10
  """Load frames for annotation editing
11
  """
12
 
@@ -15,11 +15,11 @@ def load_frames(video_path, preds_path):
15
  didson = dataset.didson
16
 
17
  frames = didson.load_frames(start_frame=0)
18
- frame_info, h, w = get_frame_info(frames, preds_path)
19
 
20
  return frame_info
21
 
22
- def get_frame_info(frames, preds_path):
23
  """Get visualized video frames ready for output, given raw ARIS/DIDSON frames.
24
  Warning: all frames in frames will be stored in memory - careful of OOM errors. Consider processing large files
25
  in batches, such as in generate_video_batches()
@@ -27,7 +27,6 @@ def get_frame_info(frames, preds_path):
27
  Returns:
28
  list(np.ndarray), height (int), width (int)
29
  """
30
- preds = json.load(open(preds_path, 'r'))
31
  color_map = { fish['id'] : fish['color'] for fish in preds['fish'] }
32
 
33
  frame_info = []
 
6
 
7
  VIDEO_HEIGHT = 700
8
 
9
+ def load_frames(video_path, preds):
10
  """Load frames for annotation editing
11
  """
12
 
 
15
  didson = dataset.didson
16
 
17
  frames = didson.load_frames(start_frame=0)
18
+ frame_info, h, w = get_frame_info(frames, preds)
19
 
20
  return frame_info
21
 
22
+ def get_frame_info(frames, preds):
23
  """Get visualized video frames ready for output, given raw ARIS/DIDSON frames.
24
  Warning: all frames in frames will be stored in memory - careful of OOM errors. Consider processing large files
25
  in batches, such as in generate_video_batches()
 
27
  Returns:
28
  list(np.ndarray), height (int), width (int)
29
  """
 
30
  color_map = { fish['id'] : fish['color'] for fish in preds['fish'] }
31
 
32
  frame_info = []
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from uploader import save_data
3
  from main import predict_task
4
  from state_handler import load_example_result, reset_state
5
  from file_reader import File
@@ -11,29 +11,29 @@ import base64
11
  from bbox import draggable_js
12
  from annotation_handler import load_frames
13
  import json
 
 
14
 
15
  max_tabs = 10
16
  table_headers = ["TOTAL" , "FRAME_NUM", "DIR", "R", "THETA", "L", "TIME", "DATE", "SPECIES"]
17
  info_headers = [
18
- "TOTAL_TIME", "DATE", "START", "END",
19
- "TOTAL_FISH", "UPSTREAM_FISH", "DOWNSTREAM_FISH", "NONDIRECTIONAL_FISH",
20
- "TOTAL_FRAMES", "FRAME_RATE",
21
- "UPSTREAM_MOTION", "INTENSITY", "THRESHOLD", "WINDOW_START", "WINDOW_END", "WATER_TEMP"
22
  ]
23
  css = """
 
24
  #result_json {
25
  height: 500px;
26
  overflow: scroll !important;
27
  }
28
- #marking_json textarea {
29
- height: 100% !important;
30
- }
31
- #marking_json label {
32
- height: calc(100% - 30px) !important;
33
  }
34
  #canvas {
35
  align-self: center;
36
  }
 
37
  """
38
  js_update_tabs = """
39
  async () => {
@@ -43,7 +43,7 @@ js_update_tabs = """
43
  style_sheet = document.getElementById("tab_style")
44
  style_sheet.innerHTML = ""
45
  for (let i = 1; i <= idx; i++) {
46
- style_sheet.innerHTML += "button.svelte-kqij2n:nth-child(" + i + "):before {content: 'Result " + i + "';}"
47
  }
48
  }
49
  """
@@ -106,14 +106,15 @@ def handle_next(_, progress=gr.Progress()):
106
  upload_file(file_path, "fishcounting", "webapp_uploads/" + file_name)
107
 
108
  # Do inference
109
- metadata, json_filepath, zip_filepath, video_filepath, marking_filepath = predict_task(file_path, gradio_progress=set_progress)
110
 
111
  # Store result for that file
 
112
  result["path_video"].append(video_filepath)
113
  result["path_zip"].append(zip_filepath)
114
  result["path_json"].append(json_filepath)
115
  result["path_marking"].append(marking_filepath)
116
- fish_table, fish_info = create_metadata_table(metadata, table_headers, info_headers)
117
  result["fish_table"].append(fish_table)
118
  result["fish_info"].append(fish_info)
119
 
@@ -146,6 +147,17 @@ def show_data():
146
  # Check if inference is done
147
  not_done = state['index'] < state['total']
148
 
 
 
 
 
 
 
 
 
 
 
 
149
  # Send update to UI, and to inference_handler to start next file inference
150
  return {
151
  zip_out: gr.update(value=result["path_zip"]),
@@ -153,83 +165,111 @@ def show_data():
153
  tabs[i]['video']: gr.update(value=result["path_video"][i], visible=True),
154
  tabs[i]['metadata']: gr.update(value=result["fish_info"][i], visible=True),
155
  tabs[i]['table']: gr.update(value=result["fish_table"][i], visible=True),
 
156
  tab_parent: gr.update(selected=i),
157
  inference_handler: gr.update(value = str(np.random.rand()), visible=not_done)
158
  }
159
 
160
 
 
161
 
162
- def init_annotation():
 
 
 
 
 
163
 
164
- frame_info = load_frames("static/example/input_file.aris", "static/example/640 2018-07-09_115439_results.json")
 
165
 
166
- html = "<div style='display:flex'>"
167
- html += "<canvas id='canvas' style='width:50%' onmousedown='mouse_down(event)' onmousemove='mouse_move(event)' onmouseup='mouse_up()' onmouseleave='mouse_up()'></canvas>"
168
- html += "<div id='annotation_display' style='width:50%'></div>"
169
- html += "</div>"
170
- html += "<p id='annotation_info' style='display:none'>" + json.dumps(frame_info) + "</p>"
171
- html += "<img id='annotation_img' onload='draw()' style='display:none'></img>"
172
- return html
 
 
173
 
174
-
 
 
 
 
 
 
 
 
 
 
175
 
176
 
177
 
178
  demo = gr.Blocks()
179
  with demo:
180
- with gr.Blocks(css=css) as inner_body:
181
 
182
  # Title of page
183
  gr.HTML(
184
  """
185
- <h1 align="center" style="font-size:xxx-large">Caltech Fisheye</h1>
186
- <p align="center">Submit an .aris file to analyze result.</p>
187
- <style id="tab_style"></style>
188
  """
189
  )
190
 
191
- annotation_editor = gr.HTML("""""")
192
- with open('annotation_editor.js', 'r') as f:
193
- js = f.read()
194
- print(js)
195
- annotation_editor.change(lambda x: gr.update(), None, annotation_editor, _js=js)
196
- gr.Button("Edit Annotation").click(init_annotation, None, annotation_editor)
197
 
198
 
199
-
200
- #Input field for aris submission
201
- input = File(file_types=[".aris", ".ddf"], type="binary", label="ARIS Input", file_count="multiple")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
- # Dummy element to call inference events, this also displays the inference progress
204
- inference_handler = gr.Text(value=str(np.random.rand()), visible=False)
205
 
206
  # Dummy element to call UI events
207
  result_handler = gr.Text(value="LOADING", visible=False, elem_id="result_handler")
208
 
209
  # List of all UI components that will recieve outputs from the result_handler
210
  UI_components = []
211
-
212
- # Zip file output
213
- zip_out = gr.File(label="ZIP Output", interactive=False)
214
  UI_components.append(zip_out)
215
 
216
  # Create result tabs
217
  tabs = []
218
- with gr.Tabs() as tab_parent:
219
  UI_components.append(tab_parent)
220
  for i in range(max_tabs):
221
  with gr.Tab(label="", id=i, elem_id="result_tab"+str(i)) as tab:
222
- with gr.Row():
223
- metadata_out = gr.JSON(label="Info", visible=False, elem_id="marking_json")
224
- video_out = gr.Video(label='Annotated Video', interactive=False, visible=False)
225
  table_out = gr.Matrix(label='Indentified Fish', headers=table_headers, interactive=False, visible=False)
 
 
 
 
 
 
226
  tabs.append({
227
  'tab': tab,
228
  'metadata': metadata_out,
229
  'video': video_out,
230
- 'table': table_out
 
231
  })
232
- UI_components.extend([tab, metadata_out, video_out, table_out])
233
 
234
  # Button to show example result
235
  #gr.Button(value="Show Example Result").click(show_example_data, None, result_handler)
@@ -253,6 +293,9 @@ with demo:
253
 
254
  # Send UI changes based on the new results to the UI_components, and tell the inference_handler to start next inference
255
  result_handler.change(show_data, None, UI_components + [inference_handler], _js=js_update_tabs)
 
 
 
256
 
257
  demo.queue().launch()
258
 
 
1
  import gradio as gr
2
+ from uploader import save_data, create_data_dir
3
  from main import predict_task
4
  from state_handler import load_example_result, reset_state
5
  from file_reader import File
 
11
  from bbox import draggable_js
12
  from annotation_handler import load_frames
13
  import json
14
+ from zipfile import ZipFile
15
+ import os
16
 
17
  max_tabs = 10
18
  table_headers = ["TOTAL" , "FRAME_NUM", "DIR", "R", "THETA", "L", "TIME", "DATE", "SPECIES"]
19
  info_headers = [
20
+ ["TOTAL_TIME", "DATE", "START", "END", "TOTAL_FRAMES", "FRAME_RATE"],
21
+ ["TOTAL_FISH", "UPSTREAM_FISH", "DOWNSTREAM_FISH", "NONDIRECTIONAL_FISH"],
22
+ ["UPSTREAM_MOTION", "INTENSITY", "THRESHOLD", "WATER_TEMP"]
 
23
  ]
24
  css = """
25
+ <style>
26
  #result_json {
27
  height: 500px;
28
  overflow: scroll !important;
29
  }
30
+ #marking_json thead {
31
+ display: none !important;
 
 
 
32
  }
33
  #canvas {
34
  align-self: center;
35
  }
36
+ </style>
37
  """
38
  js_update_tabs = """
39
  async () => {
 
43
  style_sheet = document.getElementById("tab_style")
44
  style_sheet.innerHTML = ""
45
  for (let i = 1; i <= idx; i++) {
46
+ style_sheet.innerHTML += "#result_tabs button.svelte-kqij2n:nth-child(" + i + "):before {content: 'Result " + i + "';}"
47
  }
48
  }
49
  """
 
106
  upload_file(file_path, "fishcounting", "webapp_uploads/" + file_name)
107
 
108
  # Do inference
109
+ json_result, json_filepath, zip_filepath, video_filepath, marking_filepath = predict_task(file_path, gradio_progress=set_progress)
110
 
111
  # Store result for that file
112
+ result['json_result'].append(json_result)
113
  result["path_video"].append(video_filepath)
114
  result["path_zip"].append(zip_filepath)
115
  result["path_json"].append(json_filepath)
116
  result["path_marking"].append(marking_filepath)
117
+ fish_table, fish_info = create_metadata_table(json_result, table_headers, info_headers)
118
  result["fish_table"].append(fish_table)
119
  result["fish_info"].append(fish_info)
120
 
 
147
  # Check if inference is done
148
  not_done = state['index'] < state['total']
149
 
150
+
151
+
152
+ frame_info = load_frames("static/example/input_file.aris", result['json_result'])
153
+
154
+ annotation_html = "<div style='display:flex'>"
155
+ annotation_html += "<canvas id='canvas' style='width:50%' onmousedown='mouse_down(event)' onmousemove='mouse_move(event)' onmouseup='mouse_up()' onmouseleave='mouse_up()'></canvas>"
156
+ annotation_html += "<div id='annotation_display' style='width:50%'></div>"
157
+ annotation_html += "</div>"
158
+ annotation_html += "<p id='annotation_info' style='display:none'>" + json.dumps(frame_info) + "</p>"
159
+ annotation_html += "<img id='annotation_img' onload='draw()' style='display:none'></img>"
160
+
161
  # Send update to UI, and to inference_handler to start next file inference
162
  return {
163
  zip_out: gr.update(value=result["path_zip"]),
 
165
  tabs[i]['video']: gr.update(value=result["path_video"][i], visible=True),
166
  tabs[i]['metadata']: gr.update(value=result["fish_info"][i], visible=True),
167
  tabs[i]['table']: gr.update(value=result["fish_table"][i], visible=True),
168
+ tabs[i]['annotation']: gr.update(value=annotation_html, visible=True),
169
  tab_parent: gr.update(selected=i),
170
  inference_handler: gr.update(value = str(np.random.rand()), visible=not_done)
171
  }
172
 
173
 
174
+
175
 
176
+ def preview_result(zip_info):
177
+ zip_name = zip_info[0]
178
+ print(zip_name)
179
+ dir_name = create_data_dir()
180
+ with ZipFile(zip_name) as zip_file:
181
+ ZipFile.extractall(zip_file, path=dir_name)
182
 
183
+ unzipped = os.listdir(dir_name)
184
+ print(unzipped)
185
 
186
+ reset_state(result, state)
187
+ state["index"] = 1
188
+ for file in unzipped:
189
+ if (file.endswith("_results.mp4")):
190
+ result["path_video"].append(os.path.join(dir_name, file))
191
+ elif (file.endswith("_results.json")):
192
+ result["path_json"].append(os.path.join(dir_name, file))
193
+ elif (file.endswith("_marking.txt")):
194
+ result["path_marking"].append(os.path.join(dir_name, file))
195
 
196
+ with open(result['path_json'][0]) as f:
197
+ json_result = json.load(f)
198
+ result['json_result'] = json_result
199
+ fish_table, fish_info = create_metadata_table(json_result, table_headers, info_headers)
200
+ result["fish_table"].append(fish_table)
201
+ result["fish_info"].append(fish_info)
202
+
203
+ return {
204
+ result_handler: gr.update(value = str(state["index"])),
205
+ inference_handler: gr.update()
206
+ }
207
 
208
 
209
 
210
  demo = gr.Blocks()
211
  with demo:
212
+ with gr.Blocks() as inner_body:
213
 
214
  # Title of page
215
  gr.HTML(
216
  """
217
+ <h1 align="center" style="font-size:xxx-large">Caltech Fisheye</h1>
218
+ """ + css + """
219
+ <style id="tab_style"></style>
220
  """
221
  )
222
 
 
 
 
 
 
 
223
 
224
 
225
+ with gr.Tabs():
226
+ with gr.Tab("Infer ARIS"):
227
+ gr.HTML("<p align='center' style='font-size: large;font-style: italic;'>Submit an .aris file to analyze result.</p>")
228
+ #Input field for aris submission
229
+ input = File(file_types=[".aris", ".ddf"], type="binary", label="ARIS Input", file_count="multiple")
230
+
231
+ # Dummy element to call inference events, this also displays the inference progress
232
+ inference_handler = gr.Text(value=str(np.random.rand()), visible=False)
233
+
234
+ # Zip file output
235
+ zip_out = gr.File(label="ZIP Output", interactive=False)
236
+
237
+ with gr.Tab("Review Results"):
238
+ # Title of page
239
+ gr.HTML("<p align='center' style='font-size: large;font-style: italic;'>Submit an old zip file of results to visualize.</p>")
240
+ result_input = File(file_types=[".zip"], type="binary", label="Upload result file")
241
+ preview_result_btn = gr.Button("Preview Result")
242
 
 
 
243
 
244
  # Dummy element to call UI events
245
  result_handler = gr.Text(value="LOADING", visible=False, elem_id="result_handler")
246
 
247
  # List of all UI components that will recieve outputs from the result_handler
248
  UI_components = []
 
 
 
249
  UI_components.append(zip_out)
250
 
251
  # Create result tabs
252
  tabs = []
253
+ with gr.Tabs(elem_id="result_tabs") as tab_parent:
254
  UI_components.append(tab_parent)
255
  for i in range(max_tabs):
256
  with gr.Tab(label="", id=i, elem_id="result_tab"+str(i)) as tab:
257
+ metadata_out = gr.Matrix(label="Info", interactive=False, headers=[""]*6, datatype="markdown", visible=False, elem_id="marking_json")
 
 
258
  table_out = gr.Matrix(label='Indentified Fish', headers=table_headers, interactive=False, visible=False)
259
+ video_out = gr.Video(label='Annotated Video', interactive=False, visible=False)
260
+ annotation_editor = gr.HTML("""""", visible=False)
261
+
262
+ with open('annotation_editor.js', 'r') as f:
263
+ js = f.read()
264
+ annotation_editor.change(lambda x: gr.update(), None, annotation_editor, _js=js)
265
  tabs.append({
266
  'tab': tab,
267
  'metadata': metadata_out,
268
  'video': video_out,
269
+ 'table': table_out,
270
+ 'annotation': annotation_editor
271
  })
272
+ UI_components.extend([tab, metadata_out, video_out, table_out, annotation_editor])
273
 
274
  # Button to show example result
275
  #gr.Button(value="Show Example Result").click(show_example_data, None, result_handler)
 
293
 
294
  # Send UI changes based on the new results to the UI_components, and tell the inference_handler to start next inference
295
  result_handler.change(show_data, None, UI_components + [inference_handler], _js=js_update_tabs)
296
+
297
+ # Button to load a previous result and view visualization
298
+ preview_result_btn.click(preview_result, result_input, [result_handler, inference_handler])
299
 
300
  demo.queue().launch()
301
 
aris.py CHANGED
@@ -13,6 +13,7 @@ import json
13
  import pytz
14
  from copy import deepcopy
15
  from multiprocessing import Pool
 
16
 
17
  import pyARIS
18
  from tracker import Tracker
@@ -257,13 +258,11 @@ def prep_for_mm(json_data):
257
  return json_data
258
 
259
 
260
- def create_metadata_dictionary(aris_fp, json_fp, beam_width_dir=BEAM_WIDTH_DIR):
261
  """
262
  Return:
263
  dictionary, for manual marking
264
  """
265
- with open(json_fp) as json_file:
266
- json_data = json.load(json_file)
267
 
268
  metadata = {}
269
  metadata["FILE_NAME"] = aris_fp
@@ -319,7 +318,7 @@ def create_metadata_dictionary(aris_fp, json_fp, beam_width_dir=BEAM_WIDTH_DIR):
319
 
320
  metadata["COUNT_FILE_NAME"] = 'N/A'
321
  metadata["EDITOR_ID"] = 'N/A'
322
- metadata["INTENSITY"] = f'{round(frame.threshold, 1):.1f} dB' # Missing
323
  metadata["THRESHOLD"] = f'{round(frame.threshold, 1):.1f} dB' # Missing
324
  metadata["WINDOW_START"] = round(frame.windowstart, 2)
325
  metadata["WINDOW_END"] = round(frame.windowstart + frame.windowlength, 2)
@@ -404,7 +403,9 @@ def create_metadata_dictionary(aris_fp, json_fp, beam_width_dir=BEAM_WIDTH_DIR):
404
  fish_entry['LATITUDE'] = frame.latitude or 'N 00 d 0.00000 m'
405
  fish_entry['LONGITUDE'] = frame.longitude or 'E 000 d 0.00000 m'
406
  fish_entry['PAN'] = frame.sonarpan
 
407
  fish_entry['TILT'] = frame.sonartilt
 
408
  fish_entry['ROLL'] = frame.roll # May be wrong number but sonarroll was NaN
409
  fish_entry['SPECIES'] = 'Unknown'
410
  fish_entry['MOTION'] = 'Running <-->'
@@ -424,9 +425,17 @@ def create_metadata_dictionary(aris_fp, json_fp, beam_width_dir=BEAM_WIDTH_DIR):
424
  metadata["START"] = start_time
425
  metadata["END"] = end_time
426
 
427
- return metadata
428
 
429
- def create_metadata_table(metadata, table_headers, info_headers):
 
 
 
 
 
 
 
 
430
  table = []
431
  for fish in metadata["FISH"]:
432
  row = []
@@ -440,17 +449,38 @@ def create_metadata_table(metadata, table_headers, info_headers):
440
  row.append("-")
441
  table.append(row)
442
 
443
- info = {}
444
- for header in info_headers:
445
- info[header] = metadata[header]
446
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447
  return table, info
448
 
449
- def create_manual_marking(metadata, out_path=None):
450
  """
451
  Return:
452
  string, full contents of manual marking
453
  """
 
454
 
455
  s = f'''
456
  Total Fish = {metadata["TOTAL_FISH"]}
@@ -480,7 +510,14 @@ File Total Frame# Dir R (m) Theta L(cm) dR(cm) L/dR Aspect Time
480
  ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
481
  '''
482
 
483
- for entry in metadata["FISH"]:
 
 
 
 
 
 
 
484
  s += f'{entry["FILE"]:>4} {entry["TOTAL"]:>5} {entry["FRAME_NUM"]:>6} {entry["DIR"]:>3} {entry["R"]:>6.2f} {entry["THETA"]:>6.1f} {entry["L"]:>6.1f} {entry["DR"]:>6.1f} {entry["LDR"]:>6.2f} {entry["ASPECT"]:>6.1f} {entry["TIME"]:>8} {entry["DATE"]:>10} {entry["LATITUDE"]:>17} {entry["LONGITUDE"]:>18} {entry["PAN"]:>7.2f} {entry["TILT"]:>7.2f} {entry["ROLL"]:>7.2f} {entry["SPECIES"]:>8} {entry["MOTION"]:>37} {entry["Q"]:>5} {entry["N"]:>2} {entry["COMMENT"]}\n'
485
 
486
  s += f'''
 
13
  import pytz
14
  from copy import deepcopy
15
  from multiprocessing import Pool
16
+ import math
17
 
18
  import pyARIS
19
  from tracker import Tracker
 
258
  return json_data
259
 
260
 
261
+ def add_metadata_to_result(aris_fp, json_data, beam_width_dir=BEAM_WIDTH_DIR):
262
  """
263
  Return:
264
  dictionary, for manual marking
265
  """
 
 
266
 
267
  metadata = {}
268
  metadata["FILE_NAME"] = aris_fp
 
318
 
319
  metadata["COUNT_FILE_NAME"] = 'N/A'
320
  metadata["EDITOR_ID"] = 'N/A'
321
+ metadata["INTENSITY"] = f'{round(frame.intensity, 1):.1f} dB' # Missing
322
  metadata["THRESHOLD"] = f'{round(frame.threshold, 1):.1f} dB' # Missing
323
  metadata["WINDOW_START"] = round(frame.windowstart, 2)
324
  metadata["WINDOW_END"] = round(frame.windowstart + frame.windowlength, 2)
 
403
  fish_entry['LATITUDE'] = frame.latitude or 'N 00 d 0.00000 m'
404
  fish_entry['LONGITUDE'] = frame.longitude or 'E 000 d 0.00000 m'
405
  fish_entry['PAN'] = frame.sonarpan
406
+ if math.isnan(fish_entry['PAN']): fish_entry['PAN'] = "nan"
407
  fish_entry['TILT'] = frame.sonartilt
408
+ if math.isnan(fish_entry['TILT']): fish_entry['TILT'] = "nan"
409
  fish_entry['ROLL'] = frame.roll # May be wrong number but sonarroll was NaN
410
  fish_entry['SPECIES'] = 'Unknown'
411
  fish_entry['MOTION'] = 'Running <-->'
 
425
  metadata["START"] = start_time
426
  metadata["END"] = end_time
427
 
428
+ json_data['metadata'] = metadata
429
 
430
+ return json_data
431
+
432
+ def create_metadata_table(result, table_headers, info_headers):
433
+ if 'metadata' in result:
434
+ metadata = result['metadata']
435
+ else:
436
+ metadata = { 'FISH': [] }
437
+
438
+ # Create fish table
439
  table = []
440
  for fish in metadata["FISH"]:
441
  row = []
 
449
  row.append("-")
450
  table.append(row)
451
 
452
+ # Create info table
453
+ stacked_info = []
454
+ max_col = 0
455
+ for column in info_headers:
456
+ column_res = []
457
+ for field in column:
458
+ if field in metadata:
459
+ column_res.append([field, metadata[field]])
460
+ stacked_info.append(column_res)
461
+ if len(column_res) > max_col:
462
+ max_col = len(column_res)
463
+
464
+ info = []
465
+ for i in range(max_col):
466
+ row = []
467
+ for column in stacked_info:
468
+ if i < len(column):
469
+ entry = column[i]
470
+ row.append("**" + entry[0] + "**")
471
+ row.append(str(entry[1]))
472
+ else:
473
+ row.append(" ")
474
+ row.append(" ")
475
+ info.append(row)
476
  return table, info
477
 
478
+ def create_manual_marking(results, out_path=None):
479
  """
480
  Return:
481
  string, full contents of manual marking
482
  """
483
+ metadata = results['metadata']
484
 
485
  s = f'''
486
  Total Fish = {metadata["TOTAL_FISH"]}
 
510
  ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
511
  '''
512
 
513
+ for fish in metadata["FISH"]:
514
+ entry = {}
515
+ for field in fish.keys():
516
+ if type(fish[field]) == str:
517
+ entry[field] = math.nan
518
+ else:
519
+ entry[field] = fish[field]
520
+
521
  s += f'{entry["FILE"]:>4} {entry["TOTAL"]:>5} {entry["FRAME_NUM"]:>6} {entry["DIR"]:>3} {entry["R"]:>6.2f} {entry["THETA"]:>6.1f} {entry["L"]:>6.1f} {entry["DR"]:>6.1f} {entry["LDR"]:>6.2f} {entry["ASPECT"]:>6.1f} {entry["TIME"]:>8} {entry["DATE"]:>10} {entry["LATITUDE"]:>17} {entry["LONGITUDE"]:>18} {entry["PAN"]:>7.2f} {entry["TILT"]:>7.2f} {entry["ROLL"]:>7.2f} {entry["SPECIES"]:>8} {entry["MOTION"]:>37} {entry["Q"]:>5} {entry["N"]:>2} {entry["COMMENT"]}\n'
522
 
523
  s += f'''
main.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import torch
3
  from zipfile import ZipFile
4
 
5
- from aris import create_manual_marking, BEAM_WIDTH_DIR, create_metadata_dictionary, prep_for_mm
6
  from dataloader import create_dataloader_aris
7
  from inference import do_full_inference, json_dump_round_float
8
  from visualizer import generate_video_batches
@@ -52,17 +52,16 @@ def predict_task(filepath, weights=WEIGHTS, gradio_progress=None):
52
 
53
  # re-index results if desired - this should be done before writing the file
54
  results = prep_for_mm(results)
 
55
 
56
  # write output to disk
57
  json_dump_round_float(results, results_filepath)
58
 
59
- metadata = None
60
  if dataset.didson.info['version'][3] == 5: # ARIS only
61
- metadata = create_metadata_dictionary(filepath, results_filepath)
62
- create_manual_marking(metadata, out_path=marking_filepath)
63
 
64
  # generate a video with tracking results
65
- generate_video_batches(dataset.didson, results_filepath, frame_rate, video_filepath,
66
  image_meter_width=image_meter_width, image_meter_height=image_meter_height, gp=gradio_progress)
67
 
68
  # zip up the results
@@ -74,4 +73,4 @@ def predict_task(filepath, weights=WEIGHTS, gradio_progress=None):
74
  # release GPU memory
75
  torch.cuda.empty_cache()
76
 
77
- return metadata, results_filepath, zip_filepath, video_filepath, marking_filepath
 
2
  import torch
3
  from zipfile import ZipFile
4
 
5
+ from aris import create_manual_marking, BEAM_WIDTH_DIR, add_metadata_to_result, prep_for_mm
6
  from dataloader import create_dataloader_aris
7
  from inference import do_full_inference, json_dump_round_float
8
  from visualizer import generate_video_batches
 
52
 
53
  # re-index results if desired - this should be done before writing the file
54
  results = prep_for_mm(results)
55
+ results = add_metadata_to_result(filepath, results)
56
 
57
  # write output to disk
58
  json_dump_round_float(results, results_filepath)
59
 
 
60
  if dataset.didson.info['version'][3] == 5: # ARIS only
61
+ create_manual_marking(results, out_path=marking_filepath)
 
62
 
63
  # generate a video with tracking results
64
+ generate_video_batches(dataset.didson, results, frame_rate, video_filepath,
65
  image_meter_width=image_meter_width, image_meter_height=image_meter_height, gp=gradio_progress)
66
 
67
  # zip up the results
 
73
  # release GPU memory
74
  torch.cuda.empty_cache()
75
 
76
+ return results, results_filepath, zip_filepath, video_filepath, marking_filepath
state_handler.py CHANGED
@@ -363,6 +363,7 @@ def load_example_result(result, table_headers, info_headers):
363
  def reset_state(result, state):
364
 
365
  # Reset Result
 
366
  result["path_video"] = []
367
  result["path_zip"] = []
368
  result["path_json"] = []
 
363
  def reset_state(result, state):
364
 
365
  # Reset Result
366
+ result["json_result"] = []
367
  result["path_video"] = []
368
  result["path_zip"] = []
369
  result["path_json"] = []
visualizer.py CHANGED
@@ -20,18 +20,21 @@ BOX_THICKNESS = 2
20
  FONT_SCALE = 0.65
21
  FONT_THICKNESS = 1
22
 
23
- def generate_video_batches(didson, preds_path, frame_rate, video_out_path, gp=None, image_meter_width=None, image_meter_height=None, batch_size=1000):
24
  """Write a visualized video to video_out_path, given a didson object.
25
  """
26
  if (gp): gp(0, "Generating results video...")
27
  end_frame = didson.info['endframe'] or didson.info['numframes']
28
  out = None # need to wait til we have height and width to instantiate video file
 
 
 
29
 
30
  with tqdm(total=end_frame, desc="Generating results video", ncols=0) as pbar:
31
  for i in range(0, end_frame, batch_size):
32
  batch_end = min(end_frame, i+batch_size)
33
  frames = didson.load_frames(start_frame=i, end_frame=batch_end)
34
- vid_frames, h, w = get_video_frames(frames, preds_path, frame_rate, image_meter_width, image_meter_height, start_frame=i)
35
 
36
  if out is None:
37
  out = cv2.VideoWriter(video_out_path, cv2.VideoWriter_fourcc(*'mp4v'), frame_rate, [ int(1.5*w), h ] )
@@ -46,7 +49,7 @@ def generate_video_batches(didson, preds_path, frame_rate, video_out_path, gp=No
46
 
47
  out.release()
48
 
49
- def get_video_frames(frames, preds_path, frame_rate, image_meter_width=None, image_meter_height=None, start_frame=0):
50
  """Get visualized video frames ready for output, given raw ARIS/DIDSON frames.
51
  Warning: all frames in frames will be stored in memory - careful of OOM errors. Consider processing large files
52
  in batches, such as in generate_video_batches()
@@ -54,7 +57,6 @@ def get_video_frames(frames, preds_path, frame_rate, image_meter_width=None, ima
54
  Returns:
55
  list(np.ndarray), height (int), width (int)
56
  """
57
- preds = json.load(open(preds_path, 'r'))
58
  pred_lengths = { fish['id'] : "%.2fm" % fish['length'] for fish in preds['fish'] }
59
  clip_pr_counts = Tracker.count_dirs(preds)
60
  color_map = { fish['id'] : fish['color'] for fish in preds['fish'] }
 
20
  FONT_SCALE = 0.65
21
  FONT_THICKNESS = 1
22
 
23
+ def generate_video_batches(didson, preds, frame_rate, video_out_path, gp=None, image_meter_width=None, image_meter_height=None, batch_size=1000):
24
  """Write a visualized video to video_out_path, given a didson object.
25
  """
26
  if (gp): gp(0, "Generating results video...")
27
  end_frame = didson.info['endframe'] or didson.info['numframes']
28
  out = None # need to wait til we have height and width to instantiate video file
29
+
30
+ print(preds)
31
+ print(preds.keys())
32
 
33
  with tqdm(total=end_frame, desc="Generating results video", ncols=0) as pbar:
34
  for i in range(0, end_frame, batch_size):
35
  batch_end = min(end_frame, i+batch_size)
36
  frames = didson.load_frames(start_frame=i, end_frame=batch_end)
37
+ vid_frames, h, w = get_video_frames(frames, preds, frame_rate, image_meter_width, image_meter_height, start_frame=i)
38
 
39
  if out is None:
40
  out = cv2.VideoWriter(video_out_path, cv2.VideoWriter_fourcc(*'mp4v'), frame_rate, [ int(1.5*w), h ] )
 
49
 
50
  out.release()
51
 
52
+ def get_video_frames(frames, preds, frame_rate, image_meter_width=None, image_meter_height=None, start_frame=0):
53
  """Get visualized video frames ready for output, given raw ARIS/DIDSON frames.
54
  Warning: all frames in frames will be stored in memory - careful of OOM errors. Consider processing large files
55
  in batches, such as in generate_video_batches()
 
57
  Returns:
58
  list(np.ndarray), height (int), width (int)
59
  """
 
60
  pred_lengths = { fish['id'] : "%.2fm" % fish['length'] for fish in preds['fish'] }
61
  clip_pr_counts = Tracker.count_dirs(preds)
62
  color_map = { fish['id'] : fish['color'] for fish in preds['fish'] }