import project_path import argparse from datetime import datetime import torch import os from dataloader import create_dataloader_frames_only from aris import create_manual_marking, create_metadata_dictionary, prep_for_mm from inference import setup_model, do_suppression, do_detection, do_tracking, json_dump_round_float from visualizer import generate_video_batches import json def main(args): """ Main processing task to be run in gradio - Writes aris frames to dirname(filepath)/frames/{i}.jpg - Writes json output to dirname(filepath)/{filename}_results.json - Writes manual marking to dirname(filepath)/{filename}_marking.txt - Writes video output to dirname(filepath)/{filename}_results.mp4 - Zips all results to dirname(filepath)/{filename}_results.zip Args: filepath (str): path to aris file TODO: Separate into subtasks in different queues; have a GPU-only queue. """ print("In task...") print("Cuda available in task?", torch.cuda.is_available()) dirname = args.frames locations = ["test"] for loc in locations: in_loc_dir = os.path.join(dirname, loc) out_dir = os.path.join(args.output, loc, "tracker", "data") metadata_path = os.path.join(args.metadata, loc + ".json") os.makedirs(out_dir, exist_ok=True) print(in_loc_dir) print(out_dir) print(metadata_path) seq_list = os.listdir(in_loc_dir) idx = 1 for seq in seq_list: print(" ") print("(" + str(idx) + "/" + str(len(seq_list)) + ") " + seq) print(" ") idx += 1 in_seq_dir = os.path.join(in_loc_dir, seq) infer_seq(in_seq_dir, out_dir, seq, args.weights, metadata_path) def infer_seq(in_dir, out_dir, seq_name, weights, metadata_path): gradio_progress = lambda p, m: 0 image_meter_width = -1 image_meter_height = -1 with open(metadata_path, 'r') as f: json_object = json.loads(f.read()) for seq in json_object: if seq['clip_name'] == seq_name: image_meter_width = seq['x_meter_stop'] - seq['x_meter_start'] image_meter_height = seq['y_meter_stop'] - seq['y_meter_start'] if (image_meter_height == -1): print("No metadata found for file " + seq_name) return # create dataloader dataloader = create_dataloader_frames_only(in_dir) # run detection + tracking model, device = setup_model(weights) try: inference, width, height = do_detection(dataloader, model, device, gp=gradio_progress) except: print("Error in " + seq_name) with open(os.path.join(out_dir, "ERROR_" + seq_name + ".txt"), 'w') as f: f.write("ERROR") return all_preds, real_width, real_height = do_suppression(dataloader, inference, width, height, gp=gradio_progress) results = do_tracking(all_preds, image_meter_width, image_meter_height, gp=gradio_progress) mot_rows = [] for frame in results['frames']: for fish in frame['fish']: bbox = fish['bbox'] row = [] right = bbox[0]*real_width top = bbox[1]*real_height w = bbox[2]*real_width - bbox[0]*real_width h = bbox[3]*real_height - bbox[1]*real_height row.append(str(frame['frame_num'] + 1)) row.append(str(fish['fish_id'] + 1)) row.append(str(int(right))) row.append(str(int(top))) row.append(str(int(w))) row.append(str(int(h))) row.append("-1") row.append("-1") row.append("-1") row.append("-1") mot_rows.append(",".join(row)) mot_text = "\n".join(mot_rows) with open(os.path.join(out_dir, seq_name + ".txt"), 'w') as f: f.write(mot_text) return def argument_parser(): parser = argparse.ArgumentParser() parser.add_argument("--frames", required=True, help="Path to frame directory. Required.") parser.add_argument("--metadata", required=True, help="Path to metadata directory. Required.") parser.add_argument("--output", required=True, help="Path to output directory. Required.") parser.add_argument("--weights", default='models/v5m_896_300best.pt', help="Path to saved YOLOv5 weights. Default: ../models/v5m_896_300best.pt") return parser if __name__ == "__main__": args = argument_parser().parse_args() main(args)