# The majority of this file was taken and adapted from this file, on 6/3/21: https://github.com/huggingface/datasets/blob/master/datasets/snli/snli.py # License reproduced from the original code: # coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The changes to the file include: # Changing all of the parameters to reflect childes data and information # Changing the dl_manager to be essentially unused in split_generators and loading from text files instead # Changing generate_examples to load directly from text files and clean the lines of text. import datasets import os class Childes(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ datasets.BuilderConfig( name="childes_data_no_tags", version=datasets.Version("1.0.0", ""), description="Childes language modeling dataset without speaker tags.", ) ] def _info(self): citation_text = """\\ @article{sanchez2019childes, title={childes-db: A flexible and reproducible interface to the child language data exchange system}, author={Sanchez, Alessandro and Meylan, Stephan C and Braginsky, Mika and MacDonald, Kyle E and Yurovsky, Daniel and Frank, Michael C}, journal={Behavior research methods}, volume={51}, number={4}, pages={1928--1941}, year={2019}, publisher={Springer}} """ return datasets.DatasetInfo( description = "CHILDES data for language modeling", citation = citation_text, # 6/3 Citation info is directly taken from Google Scholar features=datasets.Features( { "text": datasets.Value("string"), } ), # No default supervised_keys (as we have to pass both premise # and hypothesis as input). homepage="https://childes-db.stanford.edu/", ) def _split_generators(self, download_helper): paths = download_helper.download_and_extract({ 'train' : 'https://www.dropbox.com/s/dl/i282barrzlari08/train.txt?dl=1', 'val' : 'https://www.dropbox.com/s/gx0rngo3v5mvlcf/validation.txt?dl=1', }) list_datasets = [] phases = ['train', 'val'] dataset_names = [ datasets.Split.TRAIN, datasets.Split.VALIDATION, ] for phase, phase_name in zip(phases, dataset_names): this_dataset = datasets.SplitGenerator( name=phase_name, gen_kwargs={"file_path": paths[phase]} ) list_datasets.append(this_dataset) return list_datasets def _generate_examples(self, file_path): # Remove the speaker tags clean_text = lambda text : text.strip('[CGV] ').strip('[CHI] ').strip('\n') # 6/17: https://stackoverflow.com/questions/10406135/unicodedecodeerror-ascii-codec-cant-decode-byte-0xd1-in-position-2-ordinal with open(file_path, 'r', encoding="utf-8") as f: for idx, line in enumerate(f.readlines()): yield idx, {"text" : clean_text(line)}