batch / batch.py
zeio's picture
feat(loader): updated last batch folder pointer in the index, added loading script
0f0d992 verified
raw
history blame
5.65 kB
import os
from pandas import read_csv
from datasets import GeneratorBasedBuilder, Value, Version, BuilderConfig, Features, DatasetInfo, SplitGenerator, Split, Audio, Sequence
_DESCRIPTION = '''
The dataset contains threads parsed from the /b/ board of 2ch archive
'''
_HOMEPAGE = 'https://huggingface.co/datasets/zeio/batch'
_LICENSE = 'Apache License Version 2.0'
_CLUSTER = '{first_page:04d}-{last_page:04d}'
_URLS = {
'written': 'https://huggingface.co/datasets/zeio/batch/resolve/main/threads-compressed/{cluster}.tar.xz',
'spoken': 'https://huggingface.co/datasets/zeio/batch-speech/raw/main/threads-compressed/{cluster}.tar.xz'
}
_INDEX = 'https://huggingface.co/datasets/zeio/batch/resolve/main/index.tsv'
_N_ITEMS = 1750
_N_BATCH = 20
class Batch(GeneratorBasedBuilder):
VERSION = Version('06.11.2023')
BUILDER_CONFIGS = [
BuilderConfig(
name = 'written',
version = VERSION,
description = 'The base modification which contains only text representation of threads, which are divided into topics, which in turn are made of posts'
),
BuilderConfig(
name = 'spoken',
version = VERSION,
description = (
'An extended configuration of the dataset in which besides text some threads have an associated audio data with speech '
'generated for text in the respective thread using an alternating speaker pattern'
)
)
]
DEFAULT_CONFIG_NAME = 'written'
def _info(self):
if self.config.name == 'written':
features = Features({
'title': Value('string'),
'topics': Sequence({
'posts': Sequence({
'text': Value('string')
})
})
})
elif self.config.name == 'spoken':
features = Features({
'title': Value('string'),
'speech': Audio(sampling_rate = 48_000),
'topics': Sequence({
'posts': Sequence({
'text': Value('string')
})
})
})
else:
raise ValueError(f'Unknown config: {self.config.name}')
return DatasetInfo(
description=_DESCRIPTION,
features = features,
homepage=_HOMEPAGE,
license=_LICENSE
)
def _split_generators(self, dl_manager):
name = self.config.name
url = _URLS['written']
spoken_url = _URLS['spoken'] if name == 'spoken' else None
offset = 0
written = {}
spoken = None if spoken_url is None else {}
while offset < _N_ITEMS:
cluster = _CLUSTER.format(first_page = offset, last_page = (offset := min(offset + _N_BATCH - 1, _N_ITEMS)))
written[f'threads/{cluster}'] = dl_manager.download_and_extract(url.format(cluster = cluster))
if spoken is not None:
try:
spoken[f'threads/{cluster}'] = dl_manager.download_and_extract(spoken_url.format(cluster = cluster))
except: # speech for some clusters may be missing
pass
index = dl_manager.download_and_extract(_INDEX)
# print(clusters)
# print(index)
return [
SplitGenerator(
name = Split.TRAIN,
gen_kwargs = {
'written': written,
'spoken': spoken,
'index': index
}
)
]
def _generate_examples(self, written: dict, index: str, spoken: dict = None):
for i, row in read_csv(index, sep = '\t').iterrows():
# print(row)
path = os.path.join(written[row['path']], f'{row["thread"]}.txt')
topics = []
posts = []
# def append_topic():
# nonlocal posts, topics
# if len(posts) > 0:
# topics.append({'posts': posts})
# posts = []
with open(path, 'r', encoding = 'utf-8') as file:
for line in file.read().split('\n'):
if line:
posts.append({'text': line})
# else:
# append_topic()
elif len(posts) > 0:
topics.append({'posts': posts})
posts = []
# append_topic()
item = {
'title': row['title'],
'topics': topics
}
if spoken is not None:
speech_cluster_path = spoken.get(row['path'])
if speech_cluster_path is not None:
speech_file_path = os.path.join(speech_cluster_path, f'{row["thread"]}.mp3')
if os.path.isfile(speech_file_path):
item['speech'] = speech_file_path
yield i, item
# if sound is None:
# yield i, dict(row)
# else:
# data = dict(row)
# folder = data['folder']
# filename = data['filename']
# if folder == folder and filename == filename: # if folder and filename are not nan
# data['sound'] = os.path.join(sound, folder, f'{filename}.ogg')
# else:
# data['sound'] = NA
# data.pop('folder')
# data.pop('filename')
# yield i, data