from dataclasses import dataclass
from enum import Enum
class Model_Backbone(Enum):
Original = "Original"
Other = "Other"
def from_str(model_backbone: str):
if model_backbone == Model_Backbone.Original.value:
return Model_Backbone.Original
return Model_Backbone.Other
@classmethod
def format_for_leaderboard(cls, model_backbone: str):
return (cls.from_str(model_backbone), model_backbone)
class Training_Dataset(Enum):
XCL = "BirdSet (XCL)"
XCM = "BirdSet (XCM)"
Dedicated = "BirdSet (Dedicated)"
Other = "other"
def from_str(training_dataset: str):
if training_dataset in [Training_Dataset.Dedicated.value, Training_Dataset.Dedicated.name, "BirdSet - Dedicated", "dt", "DT"]:
return Training_Dataset.Dedicated
if training_dataset in [Training_Dataset.XCM.value, Training_Dataset.XCM.name, "BirdSet - XCM", "mt", "MT"]:
return Training_Dataset.XCM
if training_dataset in [Training_Dataset.XCL.value, Training_Dataset.XCL.name, "BirdSet - XCL", "lt", "LT"]:
return Training_Dataset.XCL
return Training_Dataset.Other
@classmethod
def format_for_leaderboard(cls, training_dataset: str):
return (cls.from_str(training_dataset), training_dataset)
class Testing_Type(Enum):
AVG = "avg"
PER = "per"
NES = "nes"
UHH = "uhh"
HSN = "hsn"
NBP = "nbp"
SSW = "ssw"
SNE = "sne"
@dataclass
class Task:
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# metric_key in the json file, name to display in the leaderboard
cmap = Task("cmap", "cmAP")
auroc = Task("auroc", "AUROC")
t1acc = Task("t1-acc", "T1-Acc")
NUM_FEWSHOT = 0
# ---------------------------------------------------
# Your leaderboard name
TITLE = """
BirdSet Leaderboard
"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
This leaderboard accompanies the [BirdSet Dataset Collection](https://huggingface.co/datasets/DBD-research-group/BirdSet). You can find out more about BirdSet in the \"About\" Tab.
"""
# Which evaluations are you running? how can people reproduce what you have?
ABOUT_TEXT = f"""
## What is BirdSet
Deep learning models have emerged as a powerful tool in avian bioacoustics to assess environmental health.
To maximize the potential of cost-effective and minimal-invasive passive acoustic monitoring (PAM), models must analyze bird vocalizations across a wide range of species and environmental conditions.
However, data fragmentation challenges a evaluation of generalization performance.
Therefore, we introduce the BirdSet dataset, comprising approximately 520,000 global bird recordings for training and over 400 hours PAM recordings for testing in a multi-label classification setting.
You can find the datasets on [Huggingface](https://huggingface.co/datasets/DBD-research-group/BirdSet) and the code on [Github](https://github.com/DBD-research-group/BirdSet).
"""
EVALUATION_QUEUE_TEXT = """
## How to Submit a Model
First you need to evaluate your model on the BirdSet dataset.
Then you can enter your evaluation information and submit a request.
We will then check your request and approve it if everything is alright.
Please make sure that you model is publicly available so that we can check you results.
If you want to submit an average over all datasets then choose \"AVG\" as \"Tested on\".
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
@misc{rauch2024birdset,
title={BirdSet: A Dataset and Benchmark for Classification in Avian Bioacoustics},
author={Lukas Rauch and Raphael Schwinger and Moritz Wirth and René Heinrich and Denis Huseljic and Jonas Lange and Stefan Kahl and Bernhard Sick and Sven Tomforde and Christoph Scholz},
year={2024},
eprint={2403.10380},
archivePrefix={arXiv},
primaryClass={cs.SD},
url={https://arxiv.org/abs/2403.10380},
}
"""