File size: 4,611 Bytes
93cf243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NASA_OSDR dataset"""

import json
import os

import datasets
import pandas as pd

_CITATION = """
@inproceedings{singh2019towards,
    title={},
    author={},
    booktitle={},
    pages={},
    year={}
}
"""

_DESCRIPTION = """
TODO: write description
"""

_HOMEPAGE = "https://"

_LICENSE = ""

_SPLITS = ["train"]

_FIFTYONE_DATASET_URL = ""


class NasaOsdr(datasets.GeneratorBasedBuilder):
    """NASA OSDR dataset."""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="NASA_OSDR",
            version=datasets.Version("1.0.0"),
            description=_DESCRIPTION,
        )
    ]

    DEFAULT_CONFIG_NAME = "NASA_OSDR"

    def _info(self):
        ASSAY_COLUMNS = ['Sample Name', 'Protocol REF', 'Parameter Value: DNA Fragmentation',
                         'Parameter Value: DNA Fragment Size', 'Extract Name', 'Protocol REF.1',
                         'Parameter Value: Library Strategy',
                         'Parameter Value: Library Selection', 'Parameter Value: Library Layout',
                         'Protocol REF.2', 'Parameter Value: Sequencing Instrument',
                         'Assay Name', 'Parameter Value: Read Length', 'Raw Data File',
                         'Protocol REF.3', 'Parameter Value: Read Depth',
                         'Parameter Value: MultiQC File Names']

        # SAMPLE_COLUMNS = ['Source Name', 'Sample Name', 'Characteristics: Organism',
        #                   'Characteristics: Strain', 'Characteristics: Genotype',
        #                   'Characteristics: Material Type', 'Factor Value: Ionizing Radiation',
        #                   'Factor Value: Generation', 'Protocol REF', 'Protocol REF.1',
        #                   'Parameter Value: ionizing radiation energy',
        #                   'Parameter Value: exposure duration',
        #                   'Parameter Value: absorbed radiation dose',
        #                   'Parameter Value: absorbed radiation dose rate',
        #                   'Parameter Value: ionizing radiation categorized by source',
        #                   'Protocol REF.2', 'Parameter Value: Sample Preservation Method',
        #                   'Parameter Value: Sample Storage Temperature',
        #                   'Parameter Value: Age at time of sample collection',
        #                   'Comment: Animal Source', 'Comment: Parental Treatment']

        features = datasets.Features(
            {
                column_name: datasets.Value("string")
                for column_name in ASSAY_COLUMNS
                # for column_name in sorted(list(set(ASSAY_COLUMNS + SAMPLE_COLUMNS)))
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # no need to download, dataset is there!
        # dataset_root = dl_manager.download_and_extract(_FIFTYONE_DATASET_URL)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "split": "train",
                    "dataset_root": "/Users/anz2/PycharmProjects/NASA/NASA_OSDR/data",
                },
            ),
        ]

    def _generate_examples(self, split: str, dataset_root: str):
        assays = os.path.join(dataset_root, "assays.csv")
        samples = os.path.join(dataset_root, "samples.csv")
        # there can be other metadata tables merged

        assays_df = pd.read_csv(assays)
        samples_df = pd.read_csv(samples)

        for (idx, assay_row), (_, sample_row) in zip(assays_df.iterrows(), samples_df.iterrows()):
            _item = {**assay_row.to_dict()}
            # _item = {**assay_row.to_dict(), **sample_row.to_dict()}

            yield idx, _item