Datasets:
File size: 3,259 Bytes
76fda69 47f7c92 76fda69 47f7c92 76fda69 47f7c92 76fda69 47f7c92 76fda69 47f7c92 76fda69 47f7c92 76fda69 47f7c92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import datasets
import os
from datasets import load_dataset
import gzip
import json
logger = datasets.logging.get_logger(__name__)
CITATION = """
"""
DESCRIPTION = """
The Open License Corpus
"""
OLC_SUBSET_NAMES = [
"ccby_law",
"ccby_s2orc",
"ccby_stackexchange",
"ccby_stackoverflow",
"ccby_wikinews",
"ccby_wikipedia",
"pd_arxiv_abstracts",
"pd_books",
"pd_law",
"pd_news",
"pd_s2orc",
"sw_amps_math",
"sw_dm_math",
"sw_github",
"sw_hackernews",
"sw_ubuntu_irc"
]
URL = "https://huggingface.co/datasets/kernelmachine/open-license-corpus/"
N_SHARDS_PER_SPLIT = {
"ccby_s2orc": {"train": 5000},
"ccby_law": {"train": 50},
"ccby_stackexchange": {"train": 1500},
"ccby_stackoverflow": {"train": 750},
"ccby_wikinews": {"train": 42},
"ccby_wikipedia": {"train": 3000},
"pd_arxiv_abstracts": {"train": 1},
"pd_books": {"train": 150},
"pd_law": {"train": 2000},
"pd_news": {"train": 10},
"pd_s2orc": {"train": 30},
"sw_amps_math": {"train": 2},
"sw_dm_math": {"train": 239},
"sw_github": {"train": 2500},
"sw_hackernews": {"train": 16},
"sw_ubuntu_irc": {"train": 27}
}
#DATA_URL = 'https://huggingface.co/datasets/kernelmachine/open-license-corpus/blob/main/data/{name}/{split}-{index:05d}-of-{n_shards:05d}.jsonl.gz'
DATA_URL = 'https://huggingface.co/datasets/kernelmachine/open-license-corpus/resolve/main/data/{name}/{split}-{index:05d}-of-{n_shards:05d}.jsonl.gz'
class OpenLicenseCorpusConfig(datasets.BuilderConfig):
def __init__(self, features, citation, **kwargs):
super().__init__(**kwargs)
class OpenLicenseCorpus(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name)
for name in OLC_SUBSET_NAMES
]
def _info(self):
return datasets.DatasetInfo(
description=DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=URL,
citation=CITATION,
)
def _split_generators(self, dl_manager):
data_urls = {}
for split in ["train"]:
n_shards = N_SHARDS_PER_SPLIT[self.config.name][split] - 1
data_urls[split] = [
DATA_URL.format(name=self.config.name, split=split, index=index, n_shards=n_shards)
for index in range(n_shards)
]
train_downloaded_files = dl_manager.download(data_urls["train"])
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files})]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for filepath in filepaths:
logger.info("generating examples from = %s", filepath)
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1
|