|
import datasets |
|
import os |
|
from datasets import load_dataset |
|
import gzip |
|
import json |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
CITATION = """ |
|
""" |
|
|
|
DESCRIPTION = """ |
|
The Open License Corpus |
|
""" |
|
|
|
OLC_SUBSET_NAMES = [ |
|
"ccby_law", |
|
"ccby_s2orc", |
|
"ccby_stackexchange", |
|
"ccby_stackoverflow", |
|
"ccby_wikinews", |
|
"ccby_wikipedia", |
|
"pd_arxiv_abstracts", |
|
"pd_books", |
|
"pd_law", |
|
"pd_news", |
|
"pd_s2orc", |
|
"sw_amps_math", |
|
"sw_dm_math", |
|
"sw_github", |
|
"sw_hackernews", |
|
"sw_ubuntu_irc" |
|
] |
|
|
|
URL = "https://huggingface.co/datasets/kernelmachine/open-license-corpus/" |
|
|
|
|
|
|
|
N_SHARDS_PER_SPLIT = { |
|
"ccby_s2orc": {"train": 5000}, |
|
"ccby_law": {"train": 50}, |
|
"ccby_stackexchange": {"train": 1500}, |
|
"ccby_stackoverflow": {"train": 750}, |
|
"ccby_wikinews": {"train": 42}, |
|
"ccby_wikipedia": {"train": 3000}, |
|
"pd_arxiv_abstracts": {"train": 1}, |
|
"pd_books": {"train": 150}, |
|
"pd_law": {"train": 2000}, |
|
"pd_news": {"train": 10}, |
|
"pd_s2orc": {"train": 30}, |
|
"sw_amps_math": {"train": 2}, |
|
"sw_dm_math": {"train": 239}, |
|
"sw_github": {"train": 2500}, |
|
"sw_hackernews": {"train": 16}, |
|
"sw_ubuntu_irc": {"train": 27} |
|
} |
|
|
|
|
|
DATA_URL = 'https://huggingface.co/datasets/kernelmachine/open-license-corpus/resolve/main/data/{name}/{split}-{index:05d}-of-{n_shards:05d}.jsonl.gz' |
|
|
|
class OpenLicenseCorpusConfig(datasets.BuilderConfig): |
|
def __init__(self, features, citation, **kwargs): |
|
super().__init__(**kwargs) |
|
|
|
|
|
class OpenLicenseCorpus(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name=name) |
|
for name in OLC_SUBSET_NAMES |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=URL, |
|
citation=CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_urls = {} |
|
for split in ["train"]: |
|
n_shards = N_SHARDS_PER_SPLIT[self.config.name][split] - 1 |
|
data_urls[split] = [ |
|
DATA_URL.format(name=self.config.name, split=split, index=index, n_shards=n_shards) |
|
for index in range(n_shards) |
|
] |
|
|
|
train_downloaded_files = dl_manager.download(data_urls["train"]) |
|
|
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files})] |
|
|
|
def _generate_examples(self, filepaths): |
|
"""This function returns the examples in the raw (text) form by iterating on all the files.""" |
|
id_ = 0 |
|
for filepath in filepaths: |
|
logger.info("generating examples from = %s", filepath) |
|
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
|
for line in f: |
|
if line: |
|
example = json.loads(line) |
|
yield id_, example |
|
id_ += 1 |
|
|