File size: 3,736 Bytes
cb86611
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3ee1f0c
cb86611
 
3ee1f0c
cb86611
 
 
 
 
 
 
 
 
 
 
 
3ee1f0c
cb86611
 
 
 
 
 
 
 
 
75a2798
 
9be07b2
cb86611
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# The majority of this file was taken and adapted from this file, on 6/3/21: https://github.com/huggingface/datasets/blob/master/datasets/snli/snli.py


# License reproduced from the original code:

# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# The changes to the file include:
# Changing all of the parameters to reflect childes data and information
# Changing the dl_manager to be essentially unused in split_generators and loading from text files instead
# Changing generate_examples to load directly from text files and clean the lines of text.


import datasets
import os

class Childes(datasets.GeneratorBasedBuilder):
     
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="childes_data",
            version=datasets.Version("1.0.0", ""),
            description="Childes language modeling dataset",
        )
    ]
    

    
    def _info(self):
        
        citation_text = """\\
@article{sanchez2019childes,
    title={childes-db: A flexible and reproducible interface to the child language data exchange system},
    author={Sanchez, Alessandro and Meylan, Stephan C and Braginsky, Mika and MacDonald, Kyle E and Yurovsky, Daniel and Frank, Michael C},
    journal={Behavior research methods},
    volume={51},
    number={4},
    pages={1928--1941},
    year={2019},
    publisher={Springer}}
    """
            
            
        return datasets.DatasetInfo(
            description = "CHILDES data for language modeling",
            citation = citation_text,
            # 6/3 Citation info is directly taken from Google Scholar
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                }
            ),
            # No default supervised_keys (as we have to pass both premise
            # and hypothesis as input).
            homepage="https://childes-db.stanford.edu/",
        )
    
    
    def _split_generators(self, download_helper):
        
        paths = download_helper.download_and_extract({
            'train' : 'https://www.dropbox.com/s/dl/i282barrzlari08/train.txt?dl=1',
            'val' : 'https://www.dropbox.com/s/gx0rngo3v5mvlcf/validation.txt?dl=1',
        })
        
        list_datasets = []
        
        phases = ['train', 'val']
        dataset_names = [
            datasets.Split.TRAIN,
            datasets.Split.VALIDATION,
        ]
        
        for phase, phase_name in zip(phases, dataset_names):
            
            this_dataset = datasets.SplitGenerator(
                name=phase_name, gen_kwargs={"file_path": paths[phase]}
            )
            
            list_datasets.append(this_dataset)
            
        
        return list_datasets
    
    def _generate_examples(self, file_path):
        
        # 6/17: https://stackoverflow.com/questions/10406135/unicodedecodeerror-ascii-codec-cant-decode-byte-0xd1-in-position-2-ordinal
        with open(file_path, 'r', encoding="utf-8") as f:
            for idx, line in enumerate(f.readlines()):
                yield idx, {"text" : line}