|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""RedPajama V2: Quality annotated Web Text Documents.""" |
|
|
|
import gzip |
|
import json |
|
import traceback |
|
from typing import List |
|
|
|
import datasets |
|
import pyarrow.parquet as pq |
|
import numpy as np |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_DESCRIPTION = """\ |
|
RedPajama V2: an Open Dataset for Training Large Language Models |
|
""" |
|
|
|
_URL_BASE = "https://data.together.xyz/redpajama-data-v2/v1.0.0" |
|
_LANGUAGES = ("en", "de", "fr", "es", "it") |
|
_MISSING_FILES_PATTERN = "urls/missing-{component}.txt" |
|
_SHARDS_ID = [i for i in range(5000)] |
|
_NUM_SHARDS = 5000 |
|
_SUBSAMPLE_FILE_COUNTS = {"sample-10B": 1, "sample-100B": 10, "sample-1T": 100} |
|
|
|
_CC_SNAPSHOT_IDS = ( |
|
"2014-15", |
|
"2014-23", |
|
"2014-35", |
|
"2014-41", |
|
"2014-42", |
|
"2014-49", |
|
"2014-52", |
|
"2015-14", |
|
"2015-22", |
|
"2015-27", |
|
"2015-32", |
|
"2015-35", |
|
"2015-40", |
|
"2015-48", |
|
"2016-07", |
|
"2016-18", |
|
"2016-22", |
|
"2016-26", |
|
"2016-30", |
|
"2016-36", |
|
"2016-40", |
|
"2016-44", |
|
"2016-50", |
|
"2017-04", |
|
"2017-09", |
|
"2017-17", |
|
"2017-22", |
|
"2017-26", |
|
"2017-30", |
|
"2017-34", |
|
"2017-39", |
|
"2017-43", |
|
"2017-47", |
|
"2017-51", |
|
"2018-05", |
|
"2018-09", |
|
"2018-13", |
|
"2018-17", |
|
"2018-22", |
|
"2018-26", |
|
"2018-30", |
|
"2018-34", |
|
"2018-39", |
|
"2018-43", |
|
"2018-47", |
|
"2018-51", |
|
"2019-04", |
|
"2019-09", |
|
"2019-13", |
|
"2019-18", |
|
"2019-22", |
|
"2019-26", |
|
"2019-30", |
|
"2019-35", |
|
"2019-39", |
|
"2019-43", |
|
"2019-47", |
|
"2019-51", |
|
"2020-05", |
|
"2020-10", |
|
"2020-16", |
|
"2020-24", |
|
"2020-29", |
|
"2020-34", |
|
"2020-40", |
|
"2020-45", |
|
"2020-50", |
|
"2021-04", |
|
"2021-10", |
|
"2021-17", |
|
"2021-21", |
|
"2021-25", |
|
"2021-31", |
|
"2021-39", |
|
"2021-43", |
|
"2021-49", |
|
"2022-05", |
|
"2022-21", |
|
"2022-27", |
|
"2022-33", |
|
"2022-40", |
|
"2022-49", |
|
"2023-06", |
|
"2023-14", |
|
) |
|
|
|
|
|
class RedPajamaDataV2Config(datasets.BuilderConfig): |
|
"""BuilderConfig for RedPajama.""" |
|
|
|
def __init__(self, *args, **kwargs): |
|
"""BuilderConfig for RedPajama. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(RedPajamaDataV2Config, self).__init__(**kwargs) |
|
self.partition: str = kwargs.pop("partition", "all") |
|
self.snapshots: List[str] = kwargs.pop("snapshots", _CC_SNAPSHOT_IDS) |
|
self.languages: List[str] = kwargs.pop("languages", _LANGUAGES) |
|
self.deduplication: bool = kwargs.pop("deduplication", False) |
|
self.shard_fraction: float = kwargs.pop('shard_fraction', 1.0) |
|
self.shards: List[int] = kwargs.pop('shards', _SHARDS_ID) |
|
|
|
class RedPajamaV2(datasets.GeneratorBasedBuilder): |
|
"""RedPajama V2: Quality annotated Web Text Documents.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
RedPajamaDataV2Config( |
|
name="sample-10B", |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 Sample with 10B tokens", |
|
), |
|
RedPajamaDataV2Config( |
|
name="sample-100B", |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 Sample with 100B tokens", |
|
), |
|
RedPajamaDataV2Config( |
|
name="sample-1T", |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 Sample with 1T tokens", |
|
), |
|
RedPajamaDataV2Config( |
|
name="default", |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2", |
|
), |
|
RedPajamaDataV2Config( |
|
name="random-sample", |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 Random Sample.", |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"raw_content": datasets.Value("string"), |
|
"doc_id": datasets.Value("string"), |
|
"meta": datasets.Value("string"), |
|
"quality_signals": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
) |
|
|
|
@staticmethod |
|
def _align_order(source, target): |
|
res = [] |
|
for i in target: |
|
if i in source: |
|
res.append(i) |
|
return res |
|
|
|
def _split_generators_full(self, dl_manager): |
|
snapshots = self._align_order( |
|
getattr(self.config, "snapshots", _CC_SNAPSHOT_IDS), |
|
_CC_SNAPSHOT_IDS) |
|
languages = self._align_order( |
|
getattr(self.config, "languages", _LANGUAGES), |
|
_LANGUAGES) |
|
partition = getattr(self.config, "partition", "all") |
|
shards_fraction = getattr(self.config, 'shard_fraction', 1.0) |
|
|
|
|
|
shards = np.array(getattr(self.config, 'shards', _SHARDS_ID)) |
|
shards = np.sort(shards[shards < _NUM_SHARDS]).tolist() |
|
|
|
if self.config.name in ("sample-10B", "sample-100B", "sample-1T"): |
|
partition = "head_middle" |
|
languages = _LANGUAGES |
|
snapshots = _CC_SNAPSHOT_IDS |
|
shards = _SHARDS_ID[:_SUBSAMPLE_FILE_COUNTS[self.config.name]] |
|
|
|
if partition == "all": |
|
partitions = ["head", "middle", "tail"] |
|
elif partition == "head_middle": |
|
partitions = ["head", "middle"] |
|
elif partition == "tail": |
|
partitions = [partition] |
|
else: |
|
raise ValueError(f"invalid partition: {partition}") |
|
|
|
|
|
|
|
missing_files_paths = dl_manager.download_and_extract( |
|
{ |
|
component: _MISSING_FILES_PATTERN.format(component=component) |
|
for component in ("documents", "signals", "duplicates") |
|
} |
|
) |
|
|
|
missing_files = {} |
|
for component, missing_file in missing_files_paths.items(): |
|
with open(missing_file, "r", encoding="utf-8") as f: |
|
missing_files[component] = set(line.strip() for line in f) |
|
|
|
|
|
documents_urls = {} |
|
quality_signals_urls = {} |
|
duplicates_ids_urls = {} |
|
base_tags = [] |
|
|
|
for i_lang, lang in enumerate(languages): |
|
for i_snapshot, snapshot in enumerate(snapshots): |
|
for i_part, part in enumerate(partitions): |
|
|
|
loc_shards = shards |
|
if self.config.name == 'random-sample': |
|
seed = 42 + i_lang * len(_LANGUAGES) + i_snapshot * len(_CC_SNAPSHOT_IDS) + i_part * 3 |
|
np.random.seed(seed) |
|
loc_shards = np.random.choice(shards, size=max(1, int(len(shards)*shards_fraction)), replace=False) |
|
|
|
for n in loc_shards: |
|
base_tag = f"{snapshot}/{n:04d}/{lang}_{part}" |
|
base_tags.append(base_tag) |
|
|
|
|
|
url = f"{_URL_BASE}/documents/{base_tag}.json.gz" |
|
if url not in missing_files["documents"]: |
|
documents_urls[base_tag] = url |
|
|
|
|
|
url = f"{_URL_BASE}/quality_signals/{base_tag}.signals.json.gz" |
|
if url not in missing_files["signals"]: |
|
quality_signals_urls[base_tag] = url |
|
|
|
|
|
url = f"{_URL_BASE}/duplicates/{base_tag}.duplicates.parquet" |
|
if url not in missing_files["duplicates"]: |
|
duplicates_ids_urls[base_tag] = url |
|
|
|
|
|
logger.info(f"Downloading {len(documents_urls)} documents files.") |
|
documents_files = dl_manager.download(documents_urls) |
|
|
|
|
|
logger.info(f"Downloading {len(quality_signals_urls)} quality signals files.") |
|
quality_signals_files = dl_manager.download(quality_signals_urls) |
|
|
|
|
|
logger.info(f"Downloading {len(duplicates_ids_urls)} duplicates ids files.") |
|
duplicates_ids_files = dl_manager.download(duplicates_ids_urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"base_tags": base_tags, |
|
"documents_files": documents_files, |
|
"quality_signals_files": quality_signals_files, |
|
"duplicates_ids_files": duplicates_ids_files, |
|
}, |
|
) |
|
] |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
return self._split_generators_full(dl_manager) |
|
|
|
def _generate_examples( |
|
self, base_tags, documents_files, quality_signals_files, duplicates_ids_files |
|
): |
|
key = 0 |
|
for base_tag in base_tags: |
|
doc_file = documents_files.get(base_tag) |
|
qs_file = quality_signals_files.get(base_tag) |
|
dupe_file = duplicates_ids_files.get(base_tag) |
|
|
|
if doc_file is None: |
|
continue |
|
|
|
for sample in self.__get_generator(base_tag, doc_file, qs_file, dupe_file): |
|
yield key, sample |
|
key += 1 |
|
|
|
def __get_generator(self, base_tag, doc_file, qs_file, dupe_file): |
|
if "_tail" in base_tag: |
|
yield from self._handle_tail(base_tag, doc_file, qs_file, dupe_file) |
|
else: |
|
yield from self._handle_head_middle(base_tag, doc_file, qs_file, dupe_file) |
|
|
|
def _handle_tail(self, base_tag, doc_file, qs_file, dupe_file): |
|
try: |
|
with gzip.open(doc_file, "rt", encoding="utf-8") as df: |
|
for row, doc in enumerate(df): |
|
doc_id = f"{base_tag}.json.gz/{row}" |
|
try: |
|
yield self.handle_record("tail", doc_id, doc, None, None) |
|
except Exception as e: |
|
logger.warning(f"failed handling row {row} in {doc_file}") |
|
traceback.print_exc() |
|
continue |
|
|
|
except gzip.BadGzipFile as e: |
|
|
|
print(f"BadGzipFile: {doc_file, qs_file}") |
|
traceback.print_exc() |
|
return |
|
|
|
def _handle_head_middle(self, base_tag, doc_file, qs_file, dupe_file): |
|
deduplication = getattr(self.config, 'deduplication', False) |
|
|
|
if qs_file is None: |
|
yield from self._handle_tail(base_tag, doc_file, None, None) |
|
return |
|
|
|
|
|
try: |
|
with open(dupe_file, "rb") as df: |
|
duplicates = set( |
|
pq.read_table(df, columns=["doc_id"], use_pandas_metadata=False)[ |
|
"doc_id" |
|
].to_pylist() |
|
) |
|
except Exception as e: |
|
logger.warning(f"no duplicate ids found for {base_tag}") |
|
duplicates = set() |
|
|
|
try: |
|
with gzip.open(doc_file, "rt", encoding="utf-8") as df: |
|
with gzip.open(qs_file, "rt", encoding="utf-8") as qf: |
|
for row, (doc, qs) in enumerate(zip(df, qf)): |
|
doc_id = f"{base_tag}.json.gz/{row}" |
|
is_duplicated = doc_id in duplicates |
|
|
|
if is_duplicated and deduplication: |
|
continue |
|
|
|
try: |
|
yield self.handle_record( |
|
part="head_middle", |
|
doc_id=doc_id, |
|
doc=doc, |
|
qs=qs, |
|
is_duplicate=is_duplicated, |
|
) |
|
except Exception as e: |
|
logger.warning( |
|
f"failed handling row {row} in {doc_file} ({qs_file})" |
|
) |
|
traceback.print_exc() |
|
continue |
|
|
|
except gzip.BadGzipFile as e: |
|
|
|
print(f"BadGzipFile: {doc_file, qs_file}") |
|
traceback.print_exc() |
|
return |
|
|
|
@staticmethod |
|
def handle_record(part, doc_id, doc, qs, is_duplicate=None): |
|
doc = json.loads(doc) |
|
qs = json.loads(qs) if qs is not None else {} |
|
|
|
meta = { |
|
"url": doc["url"], |
|
"partition": part, |
|
"language": doc["language"], |
|
"source_domain": doc["source_domain"], |
|
"date_download": doc["date_download"], |
|
"digest": doc["digest"], |
|
} |
|
|
|
quality_signals = qs.get("quality_signals", {}) |
|
quality_signals["is_duplicate"] = is_duplicate |
|
|
|
return { |
|
"raw_content": doc["raw_content"], |
|
"doc_id": doc_id, |
|
"meta": json.dumps(meta), |
|
"quality_signals": json.dumps(quality_signals), |
|
} |
|
|