Datasets:
File size: 2,442 Bytes
c73b4b4 0d132c5 c73b4b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
from pandas import read_csv
from datasets import GeneratorBasedBuilder, Value, Version, BuilderConfig, Features, DatasetInfo, SplitGenerator, Split
_DESCRIPTION = '''
This dataset contains anekdotes parsed from a few vk social network communities. The data can be useful for fine-tuning language generation models as well for tasks of automatic humour analysis.
'''
_HOMEPAGE = 'https://huggingface.co/datasets/zeio/baneks'
_LICENSE = 'Apache License Version 2.0'
_URLS = {
'censored': 'https://huggingface.co/datasets/zeio/baneks/resolve/main/censored.tsv',
'default': 'https://huggingface.co/datasets/zeio/baneks/resolve/main/default.tsv',
'inflated': 'https://huggingface.co/datasets/zeio/baneks/resolve/main/inflated.tsv'
}
class Baneks(GeneratorBasedBuilder):
VERSION = Version('10.10.2023')
BUILDER_CONFIGS = [
BuilderConfig(name = 'censored', version = VERSION, description = 'No duplicates - entries with the same text are grouped and aggregated'),
BuilderConfig(name = 'default', version = VERSION, description = 'Same as "censored", but censored words are replaced with inferred values for their initial form'),
BuilderConfig(name = 'inflated', version = VERSION, description = 'Each entry corresponds to a post, minimal changes to the source data')
]
DEFAULT_CONFIG_NAME = 'default'
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features = Features({
'text': Value('string'),
'published': Value('string'),
'id': Value('int32'),
'n-likes': Value('int32'),
'n-views': Value('float'),
'accessed': Value('string'),
'source': Value('string')
}),
homepage=_HOMEPAGE,
license=_LICENSE
)
def _split_generators(self, dl_manager):
name = self.config.name
url = _URLS[name]
# path = os.path.join(dl_manager.download_and_extract(url), f'{name}.tsv')
return [
SplitGenerator(
name = Split.TRAIN,
gen_kwargs = {
"path": dl_manager.download_and_extract(url)
}
)
]
def _generate_examples(self, path: str):
for _, row in read_csv(path, sep = '\t').iterrows():
yield f'{row["id"]:08d}-{row["source"]}', dict(row)
|