Update BirdSet.py
Browse files- BirdSet.py +131 -130
BirdSet.py
CHANGED
@@ -11,25 +11,27 @@
|
|
11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
-
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
"""BirdSet: The General Avian Monitoring Evaluation Benchmark"""
|
16 |
|
17 |
import os
|
18 |
import datasets
|
19 |
import pandas as pd
|
|
|
|
|
|
|
|
|
20 |
|
21 |
from .classes import BIRD_NAMES_NIPS4BPLUS, BIRD_NAMES_AMAZON_BASIN, BIRD_NAMES_HAWAII, \
|
22 |
BIRD_NAMES_HIGH_SIERRAS, BIRD_NAMES_SIERRA_NEVADA, BIRD_NAMES_POWDERMILL_NATURE, BIRD_NAMES_SAPSUCKER, \
|
23 |
BIRD_NAMES_COLUMBIA_COSTA_RICA, BIRD_NAMES_XENOCANTO, BIRD_NAMES_XENOCANTO_M
|
24 |
|
25 |
-
from . import
|
26 |
-
|
27 |
-
from .descriptions import _BIRD_DB_CITATION, _NIPS4BPLUS_CITATION, _NIPS4BPLUS_DESCRIPTION, \
|
28 |
_HIGH_SIERRAS_DESCRIPTION, _HIGH_SIERRAS_CITATION, _SIERRA_NEVADA_DESCRIPTION, _SIERRA_NEVADA_CITATION, \
|
29 |
_POWDERMILL_NATURE_DESCRIPTION, _POWDERMILL_NATURE_CITATION, _AMAZON_BASIN_DESCRIPTION, _AMAZON_BASIN_CITATION, \
|
30 |
_SAPSUCKER_WOODS_DESCRIPTION, _SAPSUCKER_WOODS_CITATION, _COLUMBIA_COSTA_RICA_CITATION, \
|
31 |
_COLUMBIA_COSTA_RICA_DESCRIPTION, _HAWAIIAN_ISLANDS_CITATION, _HAWAIIAN_ISLANDS_DESCRIPTION
|
32 |
|
|
|
33 |
#############################################
|
34 |
_BIRDSET_CITATION = """\
|
35 |
@article{birdset,
|
@@ -48,6 +50,45 @@ _BIRDSET_DESCRIPTION = """\
|
|
48 |
|
49 |
base_url = "https://huggingface.co/datasets/DBD-research-group/BirdSet/resolve/data"
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
class BirdSetConfig(datasets.BuilderConfig):
|
52 |
def __init__(
|
53 |
self,
|
@@ -58,7 +99,7 @@ class BirdSetConfig(datasets.BuilderConfig):
|
|
58 |
species_group_list,
|
59 |
order_list,
|
60 |
**kwargs):
|
61 |
-
super().__init__(version=datasets.Version("0.0.
|
62 |
|
63 |
features = datasets.Features({
|
64 |
"audio": datasets.Audio(sampling_rate=32_000, mono=True, decode=False),
|
@@ -373,6 +414,7 @@ class BirdSet(datasets.GeneratorBasedBuilder):
|
|
373 |
|
374 |
def _split_generators(self, dl_manager):
|
375 |
ds_name = self.config.name
|
|
|
376 |
train_files = {"PER": 11,
|
377 |
"NES": 13,
|
378 |
"UHH": 5,
|
@@ -393,7 +435,7 @@ class BirdSet(datasets.GeneratorBasedBuilder):
|
|
393 |
"SSW": 36,
|
394 |
"SNE": 5}
|
395 |
|
396 |
-
|
397 |
"NES": 1,
|
398 |
"UHH": 1,
|
399 |
"HSN": 1,
|
@@ -402,157 +444,116 @@ class BirdSet(datasets.GeneratorBasedBuilder):
|
|
402 |
"SSW": 4,
|
403 |
"SNE": 1}
|
404 |
|
|
|
405 |
if self.config.name.endswith("_xc"):
|
406 |
ds_name = ds_name[:-3]
|
407 |
dl_dir = dl_manager.download({
|
408 |
"train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
|
409 |
-
"
|
410 |
})
|
411 |
|
412 |
elif self.config.name.endswith("_scape"):
|
413 |
ds_name = ds_name[:-6]
|
414 |
dl_dir = dl_manager.download({
|
415 |
"test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
|
416 |
-
"test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1,
|
417 |
-
"
|
418 |
-
"
|
419 |
})
|
420 |
|
421 |
# use POW for XCM/XCL validation
|
422 |
elif self.config.name.startswith("XC"):
|
423 |
dl_dir = dl_manager.download({
|
424 |
"train": [os.path.join(self.config.data_dir, f"{ds_name}_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
|
425 |
-
"valid": [os.path.join(self.config.data_dir[:-3] + "POW", f"POW_test5s_shard_{n:04d}.tar.gz") for n in range(1,
|
426 |
-
"
|
427 |
-
"
|
428 |
})
|
429 |
|
430 |
-
|
431 |
dl_dir = dl_manager.download({
|
432 |
"train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
|
433 |
"test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
|
434 |
-
"test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1,
|
435 |
"meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
|
436 |
"meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
|
437 |
"meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
|
438 |
})
|
439 |
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
),
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
]
|
463 |
|
464 |
-
elif self.config.name.endswith("_scape"):
|
465 |
-
return [
|
466 |
-
datasets.SplitGenerator(
|
467 |
-
name=datasets.Split.TEST,
|
468 |
-
gen_kwargs={
|
469 |
-
"audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test"]],
|
470 |
-
"local_audio_archives_paths": local_audio_archives_paths["test"] if local_audio_archives_paths else None,
|
471 |
-
"metapath": dl_dir["metadata"],
|
472 |
-
"split": datasets.Split.TEST,
|
473 |
-
},
|
474 |
-
),
|
475 |
-
datasets.SplitGenerator(
|
476 |
-
name="test_5s",
|
477 |
-
gen_kwargs={
|
478 |
-
"audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test_5s"]],
|
479 |
-
"local_audio_archives_paths": local_audio_archives_paths["test_5s"] if local_audio_archives_paths else None,
|
480 |
-
"metapath": dl_dir["metadata_5s"],
|
481 |
-
"split": "test_multilabel"
|
482 |
-
},
|
483 |
-
),
|
484 |
-
]
|
485 |
-
|
486 |
-
return [
|
487 |
-
datasets.SplitGenerator(
|
488 |
-
name=datasets.Split.TRAIN,
|
489 |
-
gen_kwargs={
|
490 |
-
"audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["train"]],
|
491 |
-
"local_audio_archives_paths": local_audio_archives_paths["train"] if local_audio_archives_paths else None,
|
492 |
-
"metapath": dl_dir["meta_train"],
|
493 |
-
"split": datasets.Split.TRAIN,
|
494 |
-
},
|
495 |
-
),
|
496 |
-
datasets.SplitGenerator(
|
497 |
-
name=datasets.Split.TEST,
|
498 |
-
gen_kwargs={
|
499 |
-
"audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test"]],
|
500 |
-
"local_audio_archives_paths": local_audio_archives_paths["test"] if local_audio_archives_paths else None,
|
501 |
-
"metapath": dl_dir["meta_test"],
|
502 |
-
"split": datasets.Split.TEST,
|
503 |
-
},
|
504 |
-
),
|
505 |
-
datasets.SplitGenerator(
|
506 |
-
name="test_5s",
|
507 |
-
gen_kwargs={
|
508 |
-
"audio_archive_iterators": [dl_manager.iter_archive(archive_path) for archive_path in dl_dir["test_5s"]],
|
509 |
-
"local_audio_archives_paths": local_audio_archives_paths["test_5s"] if local_audio_archives_paths else None,
|
510 |
-
"metapath": dl_dir["meta_test_5s"],
|
511 |
-
"split": "test_multilabel"
|
512 |
-
},
|
513 |
-
),
|
514 |
-
]
|
515 |
-
|
516 |
-
def _generate_examples(self, audio_archive_iterators, local_audio_archives_paths, metapath, split):
|
517 |
-
metadata = pd.read_parquet(metapath)
|
518 |
idx = 0
|
519 |
-
|
|
|
520 |
for audio_path_in_archive, audio_file in audio_archive_iterator:
|
521 |
-
|
522 |
-
rows = metadata
|
523 |
-
|
524 |
-
|
525 |
-
audio = audio_path if local_audio_archives_paths else audio_file.read()
|
526 |
for _, row in rows.iterrows():
|
527 |
-
idx
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
558 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
|
|
14 |
"""BirdSet: The General Avian Monitoring Evaluation Benchmark"""
|
15 |
|
16 |
import os
|
17 |
import datasets
|
18 |
import pandas as pd
|
19 |
+
from tqdm.auto import tqdm
|
20 |
+
import tarfile
|
21 |
+
|
22 |
+
from . import classes
|
23 |
|
24 |
from .classes import BIRD_NAMES_NIPS4BPLUS, BIRD_NAMES_AMAZON_BASIN, BIRD_NAMES_HAWAII, \
|
25 |
BIRD_NAMES_HIGH_SIERRAS, BIRD_NAMES_SIERRA_NEVADA, BIRD_NAMES_POWDERMILL_NATURE, BIRD_NAMES_SAPSUCKER, \
|
26 |
BIRD_NAMES_COLUMBIA_COSTA_RICA, BIRD_NAMES_XENOCANTO, BIRD_NAMES_XENOCANTO_M
|
27 |
|
28 |
+
from .descriptions import _NIPS4BPLUS_CITATION, _NIPS4BPLUS_DESCRIPTION, \
|
|
|
|
|
29 |
_HIGH_SIERRAS_DESCRIPTION, _HIGH_SIERRAS_CITATION, _SIERRA_NEVADA_DESCRIPTION, _SIERRA_NEVADA_CITATION, \
|
30 |
_POWDERMILL_NATURE_DESCRIPTION, _POWDERMILL_NATURE_CITATION, _AMAZON_BASIN_DESCRIPTION, _AMAZON_BASIN_CITATION, \
|
31 |
_SAPSUCKER_WOODS_DESCRIPTION, _SAPSUCKER_WOODS_CITATION, _COLUMBIA_COSTA_RICA_CITATION, \
|
32 |
_COLUMBIA_COSTA_RICA_DESCRIPTION, _HAWAIIAN_ISLANDS_CITATION, _HAWAIIAN_ISLANDS_DESCRIPTION
|
33 |
|
34 |
+
|
35 |
#############################################
|
36 |
_BIRDSET_CITATION = """\
|
37 |
@article{birdset,
|
|
|
50 |
|
51 |
base_url = "https://huggingface.co/datasets/DBD-research-group/BirdSet/resolve/data"
|
52 |
|
53 |
+
|
54 |
+
def _extract_all_to_same_folder(tar_path, output_dir):
|
55 |
+
"""custom extraction for tar.gz files, that extracts all files to output_dir without subfolders"""
|
56 |
+
# check if data already exists
|
57 |
+
if not os.path.isfile(output_dir) and os.path.isdir(output_dir) and os.listdir(output_dir):
|
58 |
+
return output_dir
|
59 |
+
os.makedirs(output_dir, exist_ok=True)
|
60 |
+
|
61 |
+
with tarfile.open(tar_path, "r:gz") as tar:
|
62 |
+
for member in tar.getmembers():
|
63 |
+
if member.isfile():
|
64 |
+
member.name = os.path.basename(member.name)
|
65 |
+
tar.extract(member, path=output_dir)
|
66 |
+
|
67 |
+
return output_dir
|
68 |
+
|
69 |
+
|
70 |
+
def _extract_and_delete(dl_dir: dict) -> dict:
|
71 |
+
"""extracts downloaded files and deletes the archive file immediately, with progress bar.
|
72 |
+
only the processed archive and its content are saved at the same time."""
|
73 |
+
audio_paths = {name: [] for name, data in dl_dir.items() if isinstance(data, list)}
|
74 |
+
for name, data in dl_dir.items():
|
75 |
+
if not isinstance(data, list):
|
76 |
+
continue
|
77 |
+
|
78 |
+
# extract and immediately delete archives
|
79 |
+
for path in tqdm(data, f"Extracting {name} split"):
|
80 |
+
head, tail = os.path.split(path)
|
81 |
+
output_dir = os.path.join(head, "extracted", tail)
|
82 |
+
#audio_path = dl_manager.extract(path) # if all archive files are without subfolders this works just fine
|
83 |
+
audio_path = _extract_all_to_same_folder(path, output_dir)
|
84 |
+
os.remove(path)
|
85 |
+
os.remove(f"{path}.lock")
|
86 |
+
os.remove(f"{path}.json")
|
87 |
+
audio_paths[name].append(audio_path)
|
88 |
+
|
89 |
+
return audio_paths
|
90 |
+
|
91 |
+
|
92 |
class BirdSetConfig(datasets.BuilderConfig):
|
93 |
def __init__(
|
94 |
self,
|
|
|
99 |
species_group_list,
|
100 |
order_list,
|
101 |
**kwargs):
|
102 |
+
super().__init__(version=datasets.Version("0.0.4"), name=name, **kwargs)
|
103 |
|
104 |
features = datasets.Features({
|
105 |
"audio": datasets.Audio(sampling_rate=32_000, mono=True, decode=False),
|
|
|
414 |
|
415 |
def _split_generators(self, dl_manager):
|
416 |
ds_name = self.config.name
|
417 |
+
# settings for how much archives (tar.gz) files are uploaded for a specific dataset
|
418 |
train_files = {"PER": 11,
|
419 |
"NES": 13,
|
420 |
"UHH": 5,
|
|
|
435 |
"SSW": 36,
|
436 |
"SNE": 5}
|
437 |
|
438 |
+
test_5s_files = {"PER": 1,
|
439 |
"NES": 1,
|
440 |
"UHH": 1,
|
441 |
"HSN": 1,
|
|
|
444 |
"SSW": 4,
|
445 |
"SNE": 1}
|
446 |
|
447 |
+
# different configs, determine what needs to be downloaded
|
448 |
if self.config.name.endswith("_xc"):
|
449 |
ds_name = ds_name[:-3]
|
450 |
dl_dir = dl_manager.download({
|
451 |
"train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
|
452 |
+
"meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
|
453 |
})
|
454 |
|
455 |
elif self.config.name.endswith("_scape"):
|
456 |
ds_name = ds_name[:-6]
|
457 |
dl_dir = dl_manager.download({
|
458 |
"test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
|
459 |
+
"test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files[ds_name] + 1)],
|
460 |
+
"meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
|
461 |
+
"meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
|
462 |
})
|
463 |
|
464 |
# use POW for XCM/XCL validation
|
465 |
elif self.config.name.startswith("XC"):
|
466 |
dl_dir = dl_manager.download({
|
467 |
"train": [os.path.join(self.config.data_dir, f"{ds_name}_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
|
468 |
+
"valid": [os.path.join(self.config.data_dir[:-3] + "POW", f"POW_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files["POW"] + 1)],
|
469 |
+
"meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata.parquet"),
|
470 |
+
"meta_valid": os.path.join(self.config.data_dir[:-3] + "POW", f"POW_metadata_test_5s.parquet"),
|
471 |
})
|
472 |
|
473 |
+
else:
|
474 |
dl_dir = dl_manager.download({
|
475 |
"train": [os.path.join(self.config.data_dir, f"{ds_name}_train_shard_{n:04d}.tar.gz") for n in range(1, train_files[ds_name] + 1)],
|
476 |
"test": [os.path.join(self.config.data_dir, f"{ds_name}_test_shard_{n:04d}.tar.gz") for n in range(1, test_files[ds_name] + 1)],
|
477 |
+
"test_5s": [os.path.join(self.config.data_dir, f"{ds_name}_test5s_shard_{n:04d}.tar.gz") for n in range(1, test_5s_files[ds_name] + 1)],
|
478 |
"meta_train": os.path.join(self.config.data_dir, f"{ds_name}_metadata_train.parquet"),
|
479 |
"meta_test": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test.parquet"),
|
480 |
"meta_test_5s": os.path.join(self.config.data_dir, f"{ds_name}_metadata_test_5s.parquet"),
|
481 |
})
|
482 |
|
483 |
+
# custom extraction that deletes archives right after extraction
|
484 |
+
audio_paths = _extract_and_delete(dl_dir) if not dl_manager.is_streaming else None
|
485 |
+
|
486 |
+
# construct split generators
|
487 |
+
# assumes every key in dl_dir of NAME also has meta_NAME
|
488 |
+
names = [name for name in dl_dir.keys() if not name.startswith("meta_")]
|
489 |
+
is_streaming = dl_manager.is_streaming
|
490 |
+
|
491 |
+
return [datasets.SplitGenerator(
|
492 |
+
name=name,
|
493 |
+
gen_kwargs={
|
494 |
+
"audio_archive_iterators": (dl_manager.iter_archive(archive_path) for archive_path in dl_dir[name]) if is_streaming else () ,
|
495 |
+
"audio_extracted_paths": audio_paths[name] if not is_streaming else (),
|
496 |
+
"meta_path": dl_dir[f"meta_{name}"],
|
497 |
+
"split": name
|
498 |
+
}
|
499 |
+
) for name in names]
|
500 |
+
|
501 |
+
|
502 |
+
def _generate_examples(self, audio_archive_iterators, audio_extracted_paths, meta_path, split):
|
503 |
+
metadata = pd.read_parquet(meta_path)
|
504 |
+
if metadata.index.name != "filepath":
|
505 |
+
metadata.index = metadata["filepath"].str.split("/").apply(lambda x: x[-1])
|
506 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
507 |
idx = 0
|
508 |
+
# in case of streaming
|
509 |
+
for audio_archive_iterator in audio_archive_iterators:
|
510 |
for audio_path_in_archive, audio_file in audio_archive_iterator:
|
511 |
+
file_name = os.path.split(audio_path_in_archive)[-1]
|
512 |
+
rows = metadata.loc[[file_name]]
|
513 |
+
audio = audio_file.read()
|
|
|
|
|
514 |
for _, row in rows.iterrows():
|
515 |
+
yield idx, self._metadata_from_row(row, split, audio_path=file_name, audio=audio)
|
516 |
+
idx += 1
|
517 |
+
|
518 |
+
# in case of not streaming
|
519 |
+
for audio_extracted_path in audio_extracted_paths:
|
520 |
+
audio_files = os.listdir(audio_extracted_path)
|
521 |
+
current_metadata = metadata.loc[audio_files]
|
522 |
+
for audio_file, row in current_metadata.iterrows():
|
523 |
+
audio_path = os.path.join(audio_extracted_path, audio_file)
|
524 |
+
yield idx, self._metadata_from_row(row, split, audio_path=audio_path)
|
525 |
+
idx += 1
|
526 |
+
|
527 |
+
|
528 |
+
@staticmethod
|
529 |
+
def _metadata_from_row(row, split: str, audio_path=None, audio=None) -> dict:
|
530 |
+
return {"audio": audio_path if not audio else {"path": None, "bytes": audio},
|
531 |
+
"filepath": audio_path,
|
532 |
+
"start_time": row["start_time"],
|
533 |
+
"end_time": row["end_time"],
|
534 |
+
"low_freq": row["low_freq"],
|
535 |
+
"high_freq": row["high_freq"],
|
536 |
+
"ebird_code": row["ebird_code"] if split != "test_5s" else None,
|
537 |
+
"ebird_code_multilabel": row.get("ebird_code_multilabel", None),
|
538 |
+
"ebird_code_secondary": row.get("ebird_code_secondary", None),
|
539 |
+
"call_type": row["call_type"],
|
540 |
+
"sex": row["sex"],
|
541 |
+
"lat": row["lat"],
|
542 |
+
"long": row["long"],
|
543 |
+
"length": row.get("length", None),
|
544 |
+
"microphone": row["microphone"],
|
545 |
+
"license": row.get("license", None),
|
546 |
+
"source": row["source"],
|
547 |
+
"local_time": row["local_time"],
|
548 |
+
"detected_events": row.get("detected_events", None),
|
549 |
+
"event_cluster": row.get("event_cluster", None),
|
550 |
+
"peaks": row.get("peaks", None),
|
551 |
+
"quality": row.get("quality", None),
|
552 |
+
"recordist": row.get("recordist", None),
|
553 |
+
"genus": row.get("genus", None) if split != "test_5s" else None,
|
554 |
+
"species_group": row.get("species_group", None) if split != "test_5s" else None,
|
555 |
+
"order": row.get("order", None) if split != "test_5s" else None,
|
556 |
+
"genus_multilabel": row.get("genus_multilabel", [row.get("genus")]),
|
557 |
+
"species_group_multilabel": row.get("species_group_multilabel", [row.get("species_group")]),
|
558 |
+
"order_multilabel": row.get("order_multilabel", [row.get("order")]),
|
559 |
+
}
|