clustering_files / clustering_files.py
Guscerra's picture
Snap
5b9a178
raw
history blame
3.87 kB
import datasets
import pandas as pd
import numpy as np
logger = datasets.logging.get_logger(__name__)
_DATA_PATH = "https://huggingface.co/datasets/conversy/clustering_files/resolve/main/dataset.pkl"
class ClusteringFilesConfig(datasets.BuilderConfig):
"""BuilderConfig for Conversy Benchmark."""
def __init__(self, name, version, **kwargs):
"""BuilderConfig for Conversy Benchmark.
Args:
**kwargs: keyword arguments forwarded to super.
"""
self.name = name
self.version = version
self.features = kwargs.pop("features", None)
self.description = kwargs.pop("description", None)
self.data_url = kwargs.pop("data_url", None)
self.nb_data_shards = kwargs.pop("nb_data_shards", None)
super(ClusteringFilesConfig, self).__init__(
name=name,
version=version,
**kwargs
)
class ClusteringFiles(datasets.GeneratorBasedBuilder):
"""Conversy benchmark"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
ClusteringFilesConfig(
name="ClusteringFiles",
version=VERSION,
description="Conversy Benchmark for ML models evaluation",
features=["filename", "segments"],
data_url=_DATA_PATH,
nb_data_shards=1)
]
def _info(self):
description = (
"Voice Print Clustering Benchmark"
)
features = datasets.Features(
{
"filename": datasets.Value("string"),
"segments": [
{
"segment_id": datasets.Value("int32"),
"speaker": datasets.Value("string"),
"duration": datasets.Value("float32"),
"segment_clean": datasets.Value("bool"),
"start": datasets.Value("float32"),
"end": datasets.Value("float32"),
"readable_start": datasets.Value("string"),
"readable_end": datasets.Value("string"),
"vp": datasets.Sequence(datasets.Value("float32")),
}
]
})
return datasets.DatasetInfo(
description=description,
features=features,
supervised_keys=None,
version=self.config.version
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_url = self.config.data_url
downloaded_file = dl_manager.download_and_extract(data_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"file_path": downloaded_file},
),
]
def _generate_examples(self, file_path):
"""Yields examples."""
df = pd.read_pickle(file_path)
files = {}
for idx, row in df.iterrows():
if row["filename"] not in files:
files[row["filename"]] = {
"filename": row["filename"],
"segments": []
}
files[row["filename"]]["segments"].append({
"segment_id": row["segment_id"],
"speaker": row["speaker"],
"duration": row["duration"],
"segment_clean": row["segment_clean"],
"start": row['start'],
"end": row['end'],
"readable_start": row['readable_start'],
"readable_end": row['readable_end'],
"vp": row["vp"]
})
for idx, file_data in enumerate(files.values()):
for segment in file_data["segments"]:
segment["vp"] = segment["vp"].tolist()
yield idx, file_data