|
from datasets import Features, Sequence, Value |
|
features = Features({ |
|
"id": Value("string"), |
|
"asked_at": Value("string"), |
|
"author_name": Value("string"), |
|
"author_rep": Value("string"), |
|
"score": Value("int32"), |
|
"title": Value("string"), |
|
"tags": Sequence(Value("string")), |
|
"body": Value("string"), |
|
"comments": Sequence({ |
|
"id": Value("string"), |
|
"body": Value("string"), |
|
"at": Value("string"), |
|
"score": Value("string"), |
|
"author": Value("string"), |
|
"author_rep": Value("string"), |
|
}), |
|
"answers": Sequence({ |
|
"id": Value("string"), |
|
"body": Value("string"), |
|
"score": Value("int32"), |
|
"ts": Value("string"), |
|
"author": Value("string"), |
|
"author_rep": Value("string"), |
|
"accepted": Value("bool"), |
|
"comments": Sequence({ |
|
"id": Value("string"), |
|
"body": Value("string"), |
|
"at": Value("string"), |
|
"score": Value("string"), |
|
"author": Value("string"), |
|
"author_rep": Value("string"), |
|
}), |
|
}), |
|
}) |
|
|
|
|
|
"""The dataset is a collection of Question and Answer automatically extracted from Stack Exchange community network.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import zstandard |
|
import io |
|
|
|
import datasets |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/nurik040404/mse" |
|
_URL = 'dataset.jsonl.zst' |
|
|
|
|
|
class StackExchange(datasets.GeneratorBasedBuilder): |
|
"""The dataset is a collection of Question and Answer automatically extracted from match Stack Exchange community.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG = datasets.BuilderConfig(name=_URL) |
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
|
|
|
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_file = dl_manager.download(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": data_file, |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(filepath, 'rb') as f: |
|
dctx = zstandard.ZstdDecompressor() |
|
with dctx.stream_reader(f) as ds: |
|
with io.TextIOWrapper(ds) as s: |
|
i = 0 |
|
while s.readable(): |
|
yield i, json.loads(s.readline()) |
|
i += 1 |