Spaces:
Runtime error
Runtime error
import project_path | |
import os | |
import torch | |
from zipfile import ZipFile | |
from backend.aris import create_manual_marking, BEAM_WIDTH_DIR, add_metadata_to_result, prep_for_mm | |
from backend.dataloader import create_dataloader_aris | |
from backend.inference import do_full_inference, json_dump_round_float | |
from backend.visualizer import generate_video_batches | |
def predict_task(filepath, config, output_formats=[], gradio_progress=None): | |
""" | |
Main processing task to be run in gradio | |
- Writes aris frames to dirname(filepath)/frames/{i}.jpg | |
- Writes json output to dirname(filepath)/{filename}_results.json | |
- Writes manual marking to dirname(filepath)/{filename}_marking.txt | |
- Writes video output to dirname(filepath)/{filename}_results.mp4 | |
- Zips all results to dirname(filepath)/{filename}_results.zip | |
Args: | |
filepath (str): path to aris file | |
""" | |
if (gradio_progress): gradio_progress(0, "In task...") | |
print("Cuda available in task?", torch.cuda.is_available()) | |
# Set up save directory and define file names | |
dirname = os.path.dirname(filepath) | |
filename = os.path.basename(filepath).replace(".aris","").replace(".ddf","") | |
results_filepath = os.path.join(dirname, f"{filename}_results.json") | |
marking_filepath = os.path.join(dirname, f"{filename}_marking.txt") | |
video_filepath = os.path.join(dirname, f"{filename}_results.mp4") | |
zip_filepath = os.path.join(dirname, f"{filename}_results.zip") | |
os.makedirs(dirname, exist_ok=True) | |
# Create dataloader | |
if (gradio_progress): gradio_progress(0, "Initializing Dataloader...") | |
dataloader, dataset = create_dataloader_aris(filepath, BEAM_WIDTH_DIR, None, num_frames_bg_subtract=0) | |
dataloader, dataset = create_dataloader_aris(filepath, BEAM_WIDTH_DIR, None) | |
# Extract aris/didson info. Didson does not yet have pixel-meter info | |
if ".ddf" in filepath: | |
image_meter_width = -1 | |
image_meter_height = -1 | |
else: | |
image_meter_width = dataset.didson.info['xdim'] * dataset.didson.info['pixel_meter_width'] | |
image_meter_height = dataset.didson.info['ydim'] * dataset.didson.info['pixel_meter_height'] | |
frame_rate = dataset.didson.info['framerate'] | |
# run detection + tracking | |
results = do_full_inference(dataloader, image_meter_width, image_meter_height, gp=gradio_progress, config=config) | |
# Generate Metadata and extra inference information | |
results = prep_for_mm(results) | |
results = add_metadata_to_result(filepath, results) | |
results['metadata']['hyperparameters'] = config.to_dict() | |
# Create JSON result file | |
json_dump_round_float(results, results_filepath) | |
# Create Manual Marking file | |
if "Generate Manual Marking" in output_formats and dataset.didson.info['version'][3] == 5: | |
create_manual_marking(results, out_path=marking_filepath) | |
# Create Annotated Video | |
if "Generate Annotated Video" in output_formats: | |
generate_video_batches(dataset.didson, results, frame_rate, video_filepath, | |
image_meter_width=image_meter_width, image_meter_height=image_meter_height, gp=gradio_progress) | |
# Zip up the results | |
with ZipFile(zip_filepath, 'w') as z: | |
for file in [results_filepath, marking_filepath, video_filepath, os.path.join(dirname, 'bg_start.jpg')]: | |
if os.path.exists(file): | |
z.write(file, arcname=os.path.basename(file)) | |
# Release GPU memory | |
torch.cuda.empty_cache() | |
return results, results_filepath, zip_filepath, video_filepath, marking_filepath |