File size: 3,814 Bytes
e4bfb2b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import datasets
import logging
import csv
import sys
from csv import DictReader
csv.field_size_limit(sys.maxsize)
logger = logging.getLogger(__name__)
class FFV4Config(datasets.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, filename: str, info: str, **kwargs):
"""BuilderConfig for SuperGLUE.
Args:
features: *list[string]*, list of the features that will appear in the
feature dict. Should not include "label".
filename: *string*, csvfile for the dataset.
info: *string*, for information about the data set.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 0.0.1: Initial version
super().__init__(version=datasets.Version("0.0.1"), **kwargs)
self.filename = filename
self.info = info
class FFV4(datasets.GeneratorBasedBuilder):
"""The thing"""
BUILDER_CONFIGS = [
FFV4Config(
name="notebook_defaults",
filename="notebook_defaults.csv",
info="the result of using the default values in the V4 ffarchive notebook, except without the TS/RD filter",
),
FFV4Config(
name="notebook_defaults_ratio0.8_likes10",
filename="ratio0.8_likes10.csv",
info="default filter, but with the score filter replaced with '.ratio > 0.8, .likes > 10'",
),
]
DEFAULT_CONFIG_NAME = "notebook_defaults"
def _info(self):
return datasets.DatasetInfo(
description="Garbage datasets for LLM training",
features=datasets.Features(
{
"id": datasets.Value("int32"),
"header": datasets.Value("string"),
"story": datasets.Value("string"),
}
),
homepage="https://main.horse",
)
def _split_generators(self, x):
return [
datasets.SplitGenerator('everything', gen_kwargs={"filepath": self.config.filename}),
]
'''
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
'''
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
dr = DictReader(f)
for d in dr:
yield d['id'],d
'''
squad = json.load(f)
for article in squad["data"]:
title = article.get("title", "")
for paragraph in article["paragraphs"]:
context = paragraph["context"] # do not strip leading blank spaces GH-2585
for qa in paragraph["qas"]:
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"] for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield key, {
"title": title,
"context": context,
"question": qa["question"],
"id": qa["id"],
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
key += 1
'''
|