File size: 5,231 Bytes
f32cf18
 
 
 
 
 
c7656a3
f32cf18
 
 
 
 
c7656a3
f32cf18
 
 
 
 
 
eec73ba
f32cf18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eec73ba
 
 
 
c7656a3
 
 
 
 
 
eec73ba
 
c7656a3
 
 
 
eec73ba
 
c7656a3
 
 
eec73ba
 
c7656a3
eec73ba
 
f32cf18
 
 
c7656a3
f32cf18
 
c7656a3
f32cf18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# coding=utf-8

"""Watkins Marine Mammal Sound Database."""


import os
import random
import textwrap
import datasets
import itertools
import typing as tp
from pathlib import Path
from collections import defaultdict
from sklearn.model_selection import train_test_split

SAMPLE_RATE = 16_000

_COMPRESSED_FILENAME = 'watkins.zip'

CLASSES = ['Atlantic_Spotted_Dolphin', 'Bearded_Seal', 'Beluga,_White_Whale', 'Bottlenose_Dolphin', 'Bowhead_Whale', 'Clymene_Dolphin', 'Common_Dolphin', 'False_Killer_Whale', 'Fin,_Finback_Whale', 'Frasers_Dolphin', 'Grampus,_Rissos_Dolphin', 'Harp_Seal', 'Humpback_Whale', 'Killer_Whale', 'Leopard_Seal', 'Long-Finned_Pilot_Whale', 'Melon_Headed_Whale', 'Minke_Whale', 'Narwhal', 'Northern_Right_Whale', 'Pantropical_Spotted_Dolphin', 'Ross_Seal', 'Rough-Toothed_Dolphin', 'Short-Finned_Pacific_Pilot_Whale', 'Southern_Right_Whale', 'Sperm_Whale', 'Spinner_Dolphin', 'Striped_Dolphin', 'Walrus', 'White-beaked_Dolphin', 'White-sided_Dolphin']


class WmmsConfig(datasets.BuilderConfig):
    """BuilderConfig for WMMS."""
    
    def __init__(self, features, **kwargs):
        super(WmmsConfig, self).__init__(version=datasets.Version("0.0.1", ""), **kwargs)
        self.features = features


class WMMS(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        WmmsConfig(
            features=datasets.Features(
                {
                    "file": datasets.Value("string"),
                    "audio": datasets.Audio(sampling_rate=SAMPLE_RATE),
                    "species": datasets.Value("string"),
                    "label": datasets.ClassLabel(names=CLASSES),
                }
            ),
            name="wmms", 
            description='',
        ), 
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description="Database can be downloaded from https://archive.org/details/watkins_202104",
            features=self.config.features,
            supervised_keys=None,
            homepage="",
            citation="",
            task_templates=None,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        archive_path = dl_manager.extract(_COMPRESSED_FILENAME)
        extensions = ['.wav']

        _remove_class = 'Weddell_Seal' # only 2 samples in the dataset
        _, _walker = fast_scandir(archive_path, extensions, recursive=True)
        filepaths = [f for f in _walker if default_find_classes(f) != _remove_class]
        labels = [default_find_classes(f) for f in filepaths]

        # Step 1: Organize samples by class
        class_to_files = defaultdict(list)
        for filepath, label in zip(filepaths, labels):
            class_to_files[label].append(filepath)
        
        # Step 2: Select exactly n samples per class for the test set
        n_shot = 5
        test_files, test_labels = [], []
        train_files, train_labels = [], []
        
        for label, files in class_to_files.items():
            if len(files) < n_shot:
                raise ValueError(f"Not enough samples for class {label}")  # Ensure each class has at least n_shot samples
        
            random.Random(914).shuffle(files)  # Shuffle to ensure randomness
        
            test_files.extend(files[:n_shot])  # Pick first n_shot for test
            test_labels.extend([label] * n_shot)
        
            train_files.extend(files[n_shot:])  # Remaining go to train
            train_labels.extend([label] * (len(files) - n_shot))
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"audio_paths": train_files, "split": "train"}
            ), 
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"audio_paths": test_files, "split": "test"}
            ), 
        ]

    def _generate_examples(self, audio_paths, split=None):
        for guid, audio_path in enumerate(audio_paths):
            yield guid, {
                "id": str(guid),
                "file": audio_path, 
                "audio": audio_path, 
                "species": default_find_classes(audio_path), 
                "label": default_find_classes(audio_path), 
            }


def default_find_classes(audio_path):
    return Path(audio_path).parent.stem


def fast_scandir(path: str, exts: tp.List[str], recursive: bool = False):
    # Scan files recursively faster than glob
    # From github.com/drscotthawley/aeiou/blob/main/aeiou/core.py
    subfolders, files = [], []

    try:  # hope to avoid 'permission denied' by this try
        for f in os.scandir(path):
            try:  # 'hope to avoid too many levels of symbolic links' error
                if f.is_dir():
                    subfolders.append(f.path)
                elif f.is_file():
                    if os.path.splitext(f.name)[1].lower() in exts:
                        files.append(f.path)
            except Exception:
                pass
    except Exception:
        pass

    if recursive:
        for path in list(subfolders):
            sf, f = fast_scandir(path, exts, recursive=recursive)
            subfolders.extend(sf)
            files.extend(f)  # type: ignore

    return subfolders, files