#!/usr/bin/python import datasets import itertools import os import pyarrow as pa import pyarrow.parquet as pq BASE_DATASET = "ejschwartz/oo-method-test" def setexe(r): r['Dirname'], r['Exename'] = os.path.split(r['Binary']) return r class OOMethodTestDataset(datasets.ArrowBasedBuilder): BUILDER_CONFIGS = [ datasets.BuilderConfig( name="combined", version=datasets.Version("1.0.0"), description="All data files combined", ), datasets.BuilderConfig( name="byrow", version=datasets.Version("1.0.0"), description="Split by example (dumb)", ), datasets.BuilderConfig( name="byfuncname", version=datasets.Version("1.0.0"), description="Split by function name", ), datasets.BuilderConfig( name="bylibrary", version=datasets.Version("1.0.0"), description="Split so that library functions (those appearing in >1 exe) are used for training, and non-library functions are used for testing", ), datasets.BuilderConfig( name="bylibrarydedup", version=datasets.Version("1.0.0"), description="Split so that library functions (those appearing in >1 exe) are used for training, and non-library functions are used for testing. Only one example per function name is retained per program.", ), datasets.BuilderConfig( name="bylibrarydedupall", version=datasets.Version("1.0.0"), description="Split so that library functions (those appearing in >1 exe) are used for training, and non-library functions are used for testing. Only one example per function name is retained.", ) ] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _info(self): return datasets.DatasetInfo( features = datasets.Features({'Binary': datasets.Value(dtype='string', id=None), 'Addr': datasets.Value(dtype='string'), 'Name': datasets.Value(dtype='string'), 'Type': datasets.ClassLabel(num_classes=2, names=['func', 'method']), 'Disassembly': datasets.Value(dtype='string'), 'Dirname': datasets.Value(dtype='string'), 'Exename': datasets.Value(dtype='string')})) def _split_generators(self, dl_manager): ds = datasets.load_dataset(BASE_DATASET)['combined'] ds = ds.map(setexe, batched=False) if self.config.name == "combined": return [ datasets.SplitGenerator( name="combined", gen_kwargs={ "ds": ds, }, ), ] elif self.config.name == "byrow": ds = ds.train_test_split(test_size=0.1, seed=42) #print(ds) return [ datasets.SplitGenerator( name="train", gen_kwargs={ "ds": ds['train'], }, ), datasets.SplitGenerator( name="test", gen_kwargs={ "ds": ds['test'], }, ), ] elif self.config.name == "byfuncname": unique_names = ds.unique('Name') nameds = datasets.Dataset.from_dict({'Name': unique_names}) name_split = nameds.train_test_split(test_size=0.1, seed=42) #print(name_split) train_name = name_split['train']['Name'] test_name = name_split['test']['Name'] return [ datasets.SplitGenerator( name="train", gen_kwargs={ "ds": ds.filter(lambda r: r['Name'] in train_name), }, ), datasets.SplitGenerator( name="test", gen_kwargs={ "ds": ds.filter(lambda r: r['Name'] in test_name), }, ), ] elif self.config.name in ["bylibrary", "bylibrarydedup", "bylibrarydedupall"]: # A function (name) is a library function if it appears in more than one Exename # this is (('func', 'oo.exe'): 123) testcount = set(zip(ds['Name'], zip(ds['Binary'], ds['Exename']))) # sorted pairs by function name testcount = sorted(testcount, key=lambda x: x[0]) # group by function name grouped = itertools.groupby(testcount, lambda t: t[0]) # Move the function name to the key grouped = {k: [b for _,b in g] for k, g in grouped} def appears_in_single_exe(tuples): return len({x[1] for x in tuples}) == 1 library_func_names = {f for f, exes in grouped.items() if not appears_in_single_exe(exes)} # Exename # v library_func_names_dedup = {(f, exes[0][1]) for f, exes in grouped.items() if not appears_in_single_exe(exes)} # Binary # v library_func_names_dedup_all = {(f, exes[0][0]) for f, exes in grouped.items() if not appears_in_single_exe(exes)} nonlibrary_func_names = {f for f, exes in grouped.items() if appears_in_single_exe(exes)} train_filter_fun = None if self.config.name == "bylibrary": train_filter_fun = lambda r: r['Name'] in library_func_names elif self.config.name == "bylibrarydedup": train_filter_fun = lambda r: (r['Name'], r['Exename']) in library_func_names_dedup elif self.config.name == "bylibrarydedupall": train_filter_fun = lambda r: (r['Name'], r['Binary']) in library_func_names_dedup_all else: assert False, "Invalid configuration" return [ datasets.SplitGenerator( name="train", gen_kwargs={ "ds": ds.filter(train_filter_fun), }, ), datasets.SplitGenerator( name="test", gen_kwargs={ "ds": ds.filter(lambda r: r['Name'] in nonlibrary_func_names), }, ), ] else: assert False def _generate_tables(self, ds): # Converting to pandas is silly, but the old version of datasets doesn't # seem to have a way to convert to Arrow? for i, batch in enumerate(ds.to_pandas(batched=True)): yield i, pa.Table.from_pandas(batch)