alignment-research-dataset / alignment-research-dataset.py
Daniel O'Connell
merge sources
2f0e8fd
raw
history blame contribute delete
No virus
7.66 kB
import json
from pathlib import Path
import datasets
from datasets import Value, Sequence, Features
_CITATION = '''
@article{kirchner2022understanding,
title={Understanding AI Alignment Research: A Systematic Analysis},
author={Kirchner, Jan H and Smith, Logan and Thibodeau, Jacques and McDonnell, Kyle and Reynolds, Laria},
journal={arXiv preprint arXiv:2022.4338861},
year={2022}
}
'''
_DESCRIPTION = """The AI Alignment Research Dataset is a collection of documents related to AI Alignment and Safety from various books, research papers, and alignment related blog posts."""
_HOMEPAGE = "https://github.com/StampyAI/alignment-research-dataset"
_LICENSE = "MIT license"
_VERSION_ = '0.0.0'
def iterate_file(filename):
print(filename)
with open(filename) as f:
for l in f:
try:
yield json.loads(l)
except Exception as e:
print(f'Could not parse: {l}')
## Feature extractor helpers
def get_type(value):
"""Recursively get the huggingface type for the provided value."""
if value is None:
return None
if value and isinstance(value, (tuple, list)):
return features.Sequence(
get_type(value[0])
)
if value and isinstance(value, dict):
return {k: get_type(v) for k, v in value.items()}
if isinstance(value, str):
return Value('string')
if isinstance(value, int):
return Value('int32')
if isinstance(value, float):
return Value('double')
if isinstance(value, bool):
return Value('bool')
return None
def print_extra_features(files):
"""Go through all the provided files, and get the non default features for the given file.
This can be done manually but would be a hassle.
It's assumed that the files contain a json object on each line.
"""
ignored_keys = [
'comments', # Comments are arbitrarily nested objects, which doesn't play nice with huggingface
]
per_file = {}
for filename in sorted(files):
extra_types = {}
for item in iterate_file(filename):
for k, v in item.items():
if (k not in extra_types or not extra_types[k]) and k not in ignored_keys and k not in DEFAULT_FEATURES:
extra_types[k] = get_type(v)
per_file[filename] = extra_types
print('DATASOURCES = {')
for k, features in per_file.items():
vals = ',\n'.join(f" '{k}': {v}" for k, v in features.items())
print(f" '{k.stem}': #\n{vals}\n $,".replace('#', '{').replace('$', '}'))
print('}')
# These keys are present in all files
DEFAULT_FEATURES = {
'id': Value('string'),
'source': Value('string'),
'title': Value('string'),
'text': Value('large_string'),
'url': Value('string'),
'date_published': Value(dtype='string'),
'authors': Sequence(feature=Value(dtype='string'), length=-1),
'summary': Sequence(feature=Value(dtype='string'), length=-1),
'source_type': Value(dtype='string'),
}
# Per datasource additional features
DATASOURCES = {
'agentmodels': {
'book_title': Value(dtype='string'),
},
'agisf': {},
'aisafety.info': {},
'alignmentforum': {
'karma': Value(dtype='int32'),
'votes': Value(dtype='int32'),
'words': Value(dtype='int32'),
'comment_count': Value(dtype='int32'),
'tags': Sequence(feature=Value(dtype='string')),
'modified_at': Value(dtype='string'),
},
'arbital': {
'alias': Value(dtype='string'),
'tags': Sequence(feature=Value(dtype='string')),
},
'arxiv': {
'data_last_modified': Value(dtype='string'),
'abstract': Value(dtype='string'),
'author_comment': Value(dtype='string'),
'journal_ref': Value(dtype='string'),
'doi': Value(dtype='string'),
'primary_category': Value(dtype='string'),
'categories': Sequence(feature=Value(dtype='string'), length=-1),
},
'blogs': {
'initial_source': Value(dtype='string'),
},
'distill': {
'abstract': Value(dtype='string'),
'journal_ref': Value(dtype='string'),
'doi': Value(dtype='string'),
'bibliography_bib': Sequence(feature={'title': Value(dtype='string')}, length=-1),
},
'eaforum': {
'karma': Value(dtype='int32'),
'votes': Value(dtype='int32'),
'words': Value(dtype='int32'),
'comment_count': Value(dtype='int32'),
'tags': Sequence(feature=Value(dtype='string')),
'modified_at': Value(dtype='string'),
},
'lesswrong': {
'karma': Value(dtype='int32'),
'votes': Value(dtype='int32'),
'words': Value(dtype='int32'),
'comment_count': Value(dtype='int32'),
'tags': Sequence(feature=Value(dtype='string')),
'modified_at': Value(dtype='string'),
},
'special_docs': {},
'youtube': {},
}
def join_features(features, to_join):
"""Recursively join the provided dicts.
`to_join` can either be a dict to be merged, or a list of dicts to merge.
"""
if not to_join:
return Features(features)
if isinstance(to_join, dict):
return Features(dict(features, **to_join))
return join_features(dict(features, **to_join[0]), to_join[1:])
class AlignmentResearchDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for AlignmentResaerchDataset."""
def __init__(self, sources, features, **kwargs):
"""BuilderConfig for AlignmentResaerchDataset.
:param List[string] sources: the sources which will be used by this config
"""
super().__init__(version=datasets.Version(_VERSION_), **kwargs)
self.sources = sources
self.features = join_features(DEFAULT_FEATURES, features)
@property
def files(self):
return [f'{source}.jsonl' for source in self.sources]
class AlignmentResaerchDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version(_VERSION_)
BUILDER_CONFIGS = [
AlignmentResearchDatasetConfig(
name='all',
description='All data files',
sources=list(DATASOURCES.keys()),
features=list(DATASOURCES.values())
)
] + [
AlignmentResearchDatasetConfig(name=source, sources=[source], features=features) for source, features in DATASOURCES.items()
]
DEFAULT_CONFIG_NAME = 'all'
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=self.config.features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(self.config.files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'files': downloaded_files}
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, files):
seen = set()
def is_good(item):
item_id = item and item.get('id')
if not item_id or item_id in seen:
return False
seen.add(item_id)
return item['text'] not in [None, '', 'n/a']
def prepare_example(item):
return item['id'], {k: item.get(k) for k in self.config.features}
lines = (item for filename in files for item in iterate_file(filename))
for item in map(prepare_example, filter(is_good, lines)):
yield item