File size: 5,565 Bytes
9f3c65a ae72c9e bcc1b51 ae72c9e 0f47dab ae72c9e 9f3c65a ae72c9e 2aa3c68 ae72c9e bcc1b51 ae72c9e a43f524 ae72c9e 9f3c65a bcc1b51 9f3c65a bcc1b51 ae72c9e 0f47dab 2aa3c68 0f47dab ae72c9e 0f47dab ae72c9e 2aa3c68 ae72c9e bcc1b51 0f47dab bcc1b51 0f47dab bcc1b51 0f47dab a43f524 0f47dab 9f3c65a 2aa3c68 0f47dab 2aa3c68 0f47dab 2aa3c68 0f47dab ae72c9e 2aa3c68 ae72c9e 0f47dab 2aa3c68 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
from datasets import (
GeneratorBasedBuilder,
SplitGenerator,
DownloadManager,
BuilderConfig,
)
import json
import os
import datasets
from typing import List
_HOMEPAGE = "http://github.com/iamgroot42/mimir"
_DESCRIPTION = """\
Member and non-member splits for our MI experiments using MIMIR. Data is available for each source.
"""
_CITATION = """\
@article{duan2024membership,
title={Do Membership Inference Attacks Work on Large Language Models?},
author={Michael Duan and Anshuman Suri and Niloofar Mireshghallah and Sewon Min and Weijia Shi and Luke Zettlemoyer and Yulia Tsvetkov and Yejin Choi and David Evans and Hannaneh Hajishirzi},
year={2024},
journal={arXiv:2402.07841},
}
"""
_DOWNLOAD_URL = "https://huggingface.co/datasets/iamgroot42/mimir/resolve/main/"
class MimirConfig(BuilderConfig):
"""BuilderConfig for Mimir dataset."""
def __init__(self, *args, subsets: List[str] = [], **kwargs):
"""Constructs a MimirConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MimirConfig, self).__init__(**kwargs)
self.subsets = subsets
class MimirDataset(GeneratorBasedBuilder):
VERSION = datasets.Version("1.3.0")
BUILDER_CONFIG_CLASS = MimirConfig
BUILDER_CONFIGS = [
MimirConfig(
name="arxiv",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's Arxiv subset at various n-gram overlap thresholds"
),
MimirConfig(
name="dm_mathematics",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's DM Mathematics subset at various n-gram overlap thresholds"
),
MimirConfig(
name="github",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's GitHub subset at various n-gram overlap thresholds"
),
MimirConfig(
name="hackernews",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's HackerNews subset at various n-gram overlap thresholds"
),
MimirConfig(
name="pile_cc",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's Pile CC subset at various n-gram overlap thresholds"
),
MimirConfig(
name="pubmed_central",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's PubMed Central subset at various n-gram overlap thresholds"
),
MimirConfig(
name="wikipedia_(en)",
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"],
description="This split contains data from the Pile's Wikipedia subset at various n-gram overlap thresholds"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"input": datasets.Value("string"),
"label": datasets.Value("int32"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager):
"""Returns SplitGenerators."""
parent_dir = "cache_100_200_1000_512"
if len(self.config.subsets) > 0:
suffixes = [f"{subset}" for subset in self.config.subsets]
else:
suffixes = ["none"]
file_paths = {}
for subset_split_suffix in suffixes:
internal_fp = {}
subset_split_suffix_use = f"_{subset_split_suffix}" if subset_split_suffix != "none" else ""
internal_fp['member'] = os.path.join(parent_dir, "train", f"{self.config.name}{subset_split_suffix_use}.jsonl")
internal_fp['nonmember'] = os.path.join(parent_dir, "test", f"{self.config.name}{subset_split_suffix_use}.jsonl")
file_paths[subset_split_suffix] = internal_fp
# Download data
data_dir = {}
for k, v_dict in file_paths.items():
download_paths = [_DOWNLOAD_URL + v for v in v_dict.values()]
paths = dl_manager.download_and_extract(download_paths)
internal_dict = {k: v for k, v in zip(v_dict.keys(), paths)}
data_dir[k] = internal_dict
splits = [SplitGenerator(name=k, gen_kwargs={"file_path_dict": data_dir[k]}) for k in suffixes]
return splits
def _generate_examples(self, file_path_dict):
"""Yields individual examples for members and non-members."""
with open(file_path_dict["member"], "r") as f_member, open(file_path_dict["nonmember"], "r") as f_nonmember:
for id, (member, nonmember) in enumerate(zip(f_member, f_nonmember)):
member_text = json.loads(member)
nonmember_text = json.loads(nonmember)
# Yield separate examples for members and non-members
yield f"{id}_member", {
"input": member_text,
"label": 1, # Member example
}
yield f"{id}_nonmember", {
"input": nonmember_text,
"label": 0, # Non-member example
}
|