mimir / mimir.py
Al-not-AI's picture
change processing script
2aa3c68
from datasets import (
GeneratorBasedBuilder,
SplitGenerator,
DownloadManager,
BuilderConfig,
)
import json
import os
import datasets
from typing import List
_HOMEPAGE = "http://github.com/iamgroot42/mimir"
_DESCRIPTION = """\
Member and non-member splits for our MI experiments using MIMIR. Data is available for each source.
"""
_CITATION = """\
@article{duan2024membership,
title={Do Membership Inference Attacks Work on Large Language Models?},
author={Michael Duan and Anshuman Suri and Niloofar Mireshghallah and Sewon Min and Weijia Shi and Luke Zettlemoyer and Yulia Tsvetkov and Yejin Choi and David Evans and Hannaneh Hajishirzi},
year={2024},
journal={arXiv:2402.07841},
}
"""
_DOWNLOAD_URL = "https://huggingface.co/datasets/iamgroot42/mimir/resolve/main/"
class MimirConfig(BuilderConfig):
"""BuilderConfig for Mimir dataset."""
def __init__(self, *args, subsets: List[str] = [], **kwargs):
"""Constructs a MimirConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MimirConfig, self).__init__(**kwargs)
self.subsets = subsets
class MimirDataset(GeneratorBasedBuilder):
VERSION = datasets.Version("1.3.0")
BUILDER_CONFIG_CLASS = MimirConfig
BUILDER_CONFIGS = [
MimirConfig(
name="arxiv",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's Arxiv subset at various n-gram overlap thresholds"
),
MimirConfig(
name="dm_mathematics",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's DM Mathematics subset at various n-gram overlap thresholds"
),
MimirConfig(
name="github",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's GitHub subset at various n-gram overlap thresholds"
),
MimirConfig(
name="hackernews",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's HackerNews subset at various n-gram overlap thresholds"
),
MimirConfig(
name="pile_cc",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's Pile CC subset at various n-gram overlap thresholds"
),
MimirConfig(
name="pubmed_central",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's PubMed Central subset at various n-gram overlap thresholds"
),
MimirConfig(
name="wikipedia_(en)",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's Wikipedia subset at various n-gram overlap thresholds"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"input": datasets.Value("string"),
"label": datasets.Value("int32"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager):
"""Returns SplitGenerators."""
parent_dir = "cache_100_200_1000_512"
if len(self.config.subsets) > 0:
suffixes = [f"{subset}" for subset in self.config.subsets]
else:
suffixes = ["none"]
file_paths = {}
for subset_split_suffix in suffixes:
internal_fp = {}
subset_split_suffix_use = f"_{subset_split_suffix}" if subset_split_suffix != "none" else ""
internal_fp['member'] = os.path.join(parent_dir, "train", f"{self.config.name}{subset_split_suffix_use}.jsonl")
internal_fp['nonmember'] = os.path.join(parent_dir, "test", f"{self.config.name}{subset_split_suffix_use}.jsonl")
file_paths[subset_split_suffix] = internal_fp
# Download data
data_dir = {}
for k, v_dict in file_paths.items():
download_paths = [_DOWNLOAD_URL + v for v in v_dict.values()]
paths = dl_manager.download_and_extract(download_paths)
internal_dict = {k: v for k, v in zip(v_dict.keys(), paths)}
data_dir[k] = internal_dict
splits = [SplitGenerator(name=k, gen_kwargs={"file_path_dict": data_dir[k]}) for k in suffixes]
return splits
def _generate_examples(self, file_path_dict):
"""Yields individual examples for members and non-members."""
with open(file_path_dict["member"], "r") as f_member, open(file_path_dict["nonmember"], "r") as f_nonmember:
for id, (member, nonmember) in enumerate(zip(f_member, f_nonmember)):
member_text = json.loads(member)
nonmember_text = json.loads(nonmember)
# Yield separate examples for members and non-members
yield f"{id}_member", {
"input": member_text,
"label": 1, # Member example
}
yield f"{id}_nonmember", {
"input": nonmember_text,
"label": 0, # Non-member example
}