|
import os |
|
from pandas import read_csv |
|
|
|
from datasets import GeneratorBasedBuilder, Value, Version, BuilderConfig, Features, DatasetInfo, SplitGenerator, Split, Audio, Sequence |
|
|
|
_DESCRIPTION = ''' |
|
The dataset contains threads parsed from the /b/ board of 2ch archive |
|
''' |
|
|
|
_HOMEPAGE = 'https://huggingface.co/datasets/zeio/batch' |
|
|
|
_LICENSE = 'Apache License Version 2.0' |
|
|
|
_CLUSTER = '{first_page:04d}-{last_page:04d}' |
|
_URLS = { |
|
'written': 'https://huggingface.co/datasets/zeio/batch/resolve/main/threads-compressed/{cluster}.tar.xz', |
|
'spoken': 'https://huggingface.co/datasets/zeio/batch-speech/raw/main/threads-compressed/{cluster}.tar.xz' |
|
} |
|
_INDEX = 'https://huggingface.co/datasets/zeio/batch/resolve/main/index.tsv' |
|
|
|
_N_ITEMS = 1750 |
|
_N_BATCH = 20 |
|
|
|
|
|
class Batch(GeneratorBasedBuilder): |
|
|
|
VERSION = Version('06.11.2023') |
|
|
|
BUILDER_CONFIGS = [ |
|
BuilderConfig( |
|
name = 'written', |
|
version = VERSION, |
|
description = 'The base modification which contains only text representation of threads, which are divided into topics, which in turn are made of posts' |
|
), |
|
BuilderConfig( |
|
name = 'spoken', |
|
version = VERSION, |
|
description = ( |
|
'An extended configuration of the dataset in which besides text some threads have an associated audio data with speech ' |
|
'generated for text in the respective thread using an alternating speaker pattern' |
|
) |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = 'written' |
|
|
|
def _info(self): |
|
if self.config.name == 'written': |
|
features = Features({ |
|
'title': Value('string'), |
|
'topics': Sequence({ |
|
'posts': Sequence({ |
|
'text': Value('string') |
|
}) |
|
}) |
|
}) |
|
elif self.config.name == 'spoken': |
|
features = Features({ |
|
'title': Value('string'), |
|
'speech': Audio(sampling_rate = 48_000), |
|
'topics': Sequence({ |
|
'posts': Sequence({ |
|
'text': Value('string') |
|
}) |
|
}) |
|
}) |
|
else: |
|
raise ValueError(f'Unknown config: {self.config.name}') |
|
|
|
return DatasetInfo( |
|
description=_DESCRIPTION, |
|
features = features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
name = self.config.name |
|
|
|
url = _URLS['written'] |
|
spoken_url = _URLS['spoken'] if name == 'spoken' else None |
|
|
|
offset = 0 |
|
|
|
written = {} |
|
spoken = None if spoken_url is None else {} |
|
|
|
while offset < _N_ITEMS: |
|
cluster = _CLUSTER.format(first_page = offset, last_page = (offset := min(offset + _N_BATCH - 1, _N_ITEMS))) |
|
written[f'threads/{cluster}'] = dl_manager.download_and_extract(url.format(cluster = cluster)) |
|
if spoken is not None: |
|
try: |
|
spoken[f'threads/{cluster}'] = dl_manager.download_and_extract(spoken_url.format(cluster = cluster)) |
|
except: |
|
pass |
|
|
|
index = dl_manager.download_and_extract(_INDEX) |
|
|
|
|
|
|
|
|
|
return [ |
|
SplitGenerator( |
|
name = Split.TRAIN, |
|
gen_kwargs = { |
|
'written': written, |
|
'spoken': spoken, |
|
'index': index |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, written: dict, index: str, spoken: dict = None): |
|
for i, row in read_csv(index, sep = '\t').iterrows(): |
|
|
|
|
|
path = os.path.join(written[row['path']], f'{row["thread"]}.txt') |
|
|
|
topics = [] |
|
posts = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(path, 'r', encoding = 'utf-8') as file: |
|
for line in file.read().split('\n'): |
|
if line: |
|
posts.append({'text': line}) |
|
|
|
|
|
elif len(posts) > 0: |
|
topics.append({'posts': posts}) |
|
posts = [] |
|
|
|
|
|
|
|
item = { |
|
'title': row['title'], |
|
'topics': topics |
|
} |
|
|
|
if spoken is not None: |
|
speech_cluster_path = spoken.get(row['path']) |
|
|
|
if speech_cluster_path is not None: |
|
speech_file_path = os.path.join(speech_cluster_path, f'{row["thread"]}.mp3') |
|
|
|
if os.path.isfile(speech_file_path): |
|
item['speech'] = speech_file_path |
|
|
|
yield i, item |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|