srivarra commited on
Commit
53517ab
1 Parent(s): 7e4b160

added fov zip file

Browse files
Files changed (5) hide show
  1. .gitignore +1 -0
  2. .vscode/settings.json +3 -0
  3. README.md +126 -0
  4. ark_example.py +183 -0
  5. data/fovs.zip +3 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ .DS_Store
.vscode/settings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ {
2
+ "python.formatting.provider": "black"
3
+ }
README.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging
3
+ ---
4
+
5
+ # Dataset Card for [Dataset Name]
6
+
7
+ ## Table of Contents
8
+ - [Table of Contents](#table-of-contents)
9
+ - [Dataset Description](#dataset-description)
10
+ - [Dataset Summary](#dataset-summary)
11
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
12
+ - [Languages](#languages)
13
+ - [Dataset Structure](#dataset-structure)
14
+ - [Data Instances](#data-instances)
15
+ - [Data Fields](#data-fields)
16
+ - [Data Splits](#data-splits)
17
+ - [Dataset Creation](#dataset-creation)
18
+ - [Curation Rationale](#curation-rationale)
19
+ - [Source Data](#source-data)
20
+ - [Annotations](#annotations)
21
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
22
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
23
+ - [Social Impact of Dataset](#social-impact-of-dataset)
24
+ - [Discussion of Biases](#discussion-of-biases)
25
+ - [Other Known Limitations](#other-known-limitations)
26
+ - [Additional Information](#additional-information)
27
+ - [Dataset Curators](#dataset-curators)
28
+ - [Licensing Information](#licensing-information)
29
+ - [Citation Information](#citation-information)
30
+ - [Contributions](#contributions)
31
+
32
+ ## Dataset Description
33
+
34
+ - **Homepage:**
35
+ - **Repository:**
36
+ - **Paper:**
37
+ - **Leaderboard:**
38
+ - **Point of Contact:**
39
+
40
+ ### Dataset Summary
41
+
42
+ [More Information Needed]
43
+
44
+ ### Supported Tasks and Leaderboards
45
+
46
+ [More Information Needed]
47
+
48
+ ### Languages
49
+
50
+ [More Information Needed]
51
+
52
+ ## Dataset Structure
53
+
54
+ ### Data Instances
55
+
56
+ [More Information Needed]
57
+
58
+ ### Data Fields
59
+
60
+ [More Information Needed]
61
+
62
+ ### Data Splits
63
+
64
+ [More Information Needed]
65
+
66
+ ## Dataset Creation
67
+
68
+ ### Curation Rationale
69
+
70
+ [More Information Needed]
71
+
72
+ ### Source Data
73
+
74
+ #### Initial Data Collection and Normalization
75
+
76
+ [More Information Needed]
77
+
78
+ #### Who are the source language producers?
79
+
80
+ [More Information Needed]
81
+
82
+ ### Annotations
83
+
84
+ #### Annotation process
85
+
86
+ [More Information Needed]
87
+
88
+ #### Who are the annotators?
89
+
90
+ [More Information Needed]
91
+
92
+ ### Personal and Sensitive Information
93
+
94
+ [More Information Needed]
95
+
96
+ ## Considerations for Using the Data
97
+
98
+ ### Social Impact of Dataset
99
+
100
+ [More Information Needed]
101
+
102
+ ### Discussion of Biases
103
+
104
+ [More Information Needed]
105
+
106
+ ### Other Known Limitations
107
+
108
+ [More Information Needed]
109
+
110
+ ## Additional Information
111
+
112
+ ### Dataset Curators
113
+
114
+ [More Information Needed]
115
+
116
+ ### Licensing Information
117
+
118
+ [More Information Needed]
119
+
120
+ ### Citation Information
121
+
122
+ [More Information Needed]
123
+
124
+ ### Contributions
125
+
126
+ Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
ark_example.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This dataset contains example data for running through the multiplexed imaging data pipeline in
17
+ Ark Analysis: https://github.com/angelolab/ark-analysis
18
+ """
19
+
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+ import pathlib
25
+ import glob
26
+ import tifffile
27
+
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {Ark Analysis Example Dataset},
32
+ author={Angelo Lab},
33
+ year={2022}
34
+ }
35
+ """
36
+
37
+ # TODO: Add description of the dataset here
38
+ # You can copy an official description
39
+ _DESCRIPTION = """\
40
+ This dataset contains 11 Field of Views (FOVs), each with 22 channels.
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/angelolab/ark-analysis"
44
+
45
+ _LICENSE = "https://github.com/angelolab/ark-analysis/blob/main/LICENSE"
46
+
47
+ # TODO: Add link to the official dataset URLs here
48
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
+ _URL_REPO = "https://huggingface.co/datasets/angelolab/ark_example/resolve/main/"
51
+
52
+
53
+ _URLS = {"base_dataset": f"{_URL_REPO}/data/fovs.zip"}
54
+
55
+ """
56
+ https://huggingface.co/docs/datasets/dataset_script
57
+ https://huggingface.co/docs/datasets/share
58
+ https://huggingface.co/datasets/allenai/wmt22_african/blob/main/wmt22_african.py
59
+ https://huggingface.co/docs/datasets/repository_structure
60
+ """
61
+
62
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
63
+ class ArkExample(datasets.GeneratorBasedBuilder):
64
+ """The Dataset consists of 12 FOVs"""
65
+
66
+ VERSION = datasets.Version("0.0.1")
67
+
68
+ # This is an example of a dataset with multiple configurations.
69
+ # If you don't want/need to define several sub-sets in your dataset,
70
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
71
+
72
+ # If you need to make complex sub-parts in the datasets with configurable options
73
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
74
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
75
+
76
+ # You will be able to load one or the other configurations in the following list with
77
+ # data = datasets.load_dataset('my_dataset', 'base_dataset')
78
+ # data = datasets.load_dataset('my_dataset', 'dev_dataset')
79
+ BUILDER_CONFIGS = [
80
+ datasets.BuilderConfig(
81
+ name="base_dataset",
82
+ version=VERSION,
83
+ description="This dataset contains only the 12 FOVs.",
84
+ ),
85
+ datasets.BuilderConfig(
86
+ name="dev_dataset",
87
+ version=VERSION,
88
+ description="This dataset is a superset of the base_dataset, and contains intermediate data for all notebooks. \
89
+ Therefore you can start at any notebook with this dataset.",
90
+ ),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = "base_dataset" # It's not mandatory to have a default configuration. Just use one if it make sense.
94
+
95
+ def _info(self):
96
+ # TODO: This method specifies the datasets.DatasetInfo object which contains information and typings for the dataset
97
+ if (
98
+ self.config.name == "base_dataset"
99
+ ): # This is the name of the configuration selected in BUILDER_CONFIGS above
100
+ features = datasets.Features(
101
+ {
102
+ "fov": datasets.Sequence({"channel": datasets.Image()}),
103
+ }
104
+ )
105
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
106
+ features = datasets.Features(
107
+ {
108
+ "sentence": datasets.Value("string"),
109
+ "option2": datasets.Value("string"),
110
+ "second_domain_answer": datasets.Value("string")
111
+ # These are the features of your dataset like images, labels ...
112
+ }
113
+ )
114
+ return datasets.DatasetInfo(
115
+ # This is the description that will appear on the datasets page.
116
+ description=_DESCRIPTION,
117
+ # This defines the different columns of the dataset and their types
118
+ features=features, # Here we define them above because they are different between the two configurations
119
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
120
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
121
+ # supervised_keys=("sentence", "label"),
122
+ # Homepage of the dataset for documentation
123
+ homepage=_HOMEPAGE,
124
+ # License for the dataset if available
125
+ license=_LICENSE,
126
+ # Citation for the dataset
127
+ citation=_CITATION,
128
+ )
129
+
130
+ def _split_generators(self, dl_manager):
131
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
132
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
133
+
134
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
135
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
136
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
137
+ urls = _URLS[self.config.name]
138
+ data_dir = dl_manager.download_and_extract(urls)
139
+ return [
140
+ datasets.SplitGenerator(
141
+ name="BASE_DATASET",
142
+ # These kwargs will be passed to _generate_examples
143
+ gen_kwargs={
144
+ "filepath": pathlib.Path(data_dir) / "fovs"
145
+ },
146
+ ),
147
+ ]
148
+
149
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
150
+ def _generate_examples(self, filepath: pathlib.Path):
151
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
152
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
153
+
154
+ file_paths = list(filepath.rglob("*"))
155
+
156
+ for fp in file_paths:
157
+ if fp.suffix in [".tiff", ".tif"]:
158
+
159
+ image_data = tifffile.imread(fp, key=0)
160
+ if self.config.name == "base_dataset":
161
+ yield fp.parent.stem, {
162
+ "chan": image_data,
163
+ "path": fp
164
+ }
165
+
166
+ # with open(filepath, encoding="utf-8") as f:
167
+ # for key, row in enumerate(f):
168
+ # data = json.loads(row)
169
+ # if self.config.name == "first_domain":
170
+ # # Yields examples as (key, example) tuples
171
+ # yield key, {
172
+ # "sentence": data["sentence"],
173
+ # "option1": data["option1"],
174
+ # "answer": "" if split == "test" else data["answer"],
175
+ # }
176
+ # else:
177
+ # yield key, {
178
+ # "sentence": data["sentence"],
179
+ # "option2": data["option2"],
180
+ # "second_domain_answer": ""
181
+ # if split == "test"
182
+ # else data["second_domain_answer"],
183
+ # }
data/fovs.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86e72fb01ea522ffe9e981d7b1f7077cf9f81ffd5f1a405071eb4d2752d0ef5d
3
+ size 400238028