srivarra commited on
Commit
3bf04b3
1 Parent(s): 4767d85

example_dataset_updates (#1)

Browse files

- added datasets for nb2 (ab31001291f18268508f58b11693a6ec6e348e32)
- updated nb paths (d59b409f97cf74c19f02aaa6771cb33d0a46f934)
- small changes (9fd97a063c20e467c16c92e8087da1a4a62a93d8)
- updates to downloading, dataset updates (d0916a6c4d88b06bb2aff693ae8f4cf126bce1ed)
- removed __MACOSX (b0db6d64361348cdff2d1a590be32a8cf667358f)
- changed key name to reflect relative path (49efbc839f81ee899ac882df8e39f1248d675911)
- reverted key name change (f661869424c07e00bcfe523989132bf35ed7f84a)
- remove dict type hints (1fdc7ac3aab0f254169c0a596d0abc4a1facacd0)
- removed useless comment. (81e248c295fba5332305d1ca7764a7fc97da9cd6)
- Merge branch 'main' into pr/1 (531c2d184aa5658f076c61d5c106357aed840b6d)
- added feature_1 for deepcell_output tifs (9fecc0ccbb8f2cf1b33172b827f51dfdcf11c149)
- added datasets for nb3 (4e8f8581f72fed15fe2f5189aad9a45edc9fd765)
- added nb3 dataset Builder (7f4b0c6f875c866ea51694bce21d100e2457689d)
- URL_DATASET_CONFIGS name fix (d047ac0f69e882a339ce28f5f70fc435aaea6d96)
- added datasets for nb4 (3dd07d96b034fcda17b2dac6d58c5aee724f2b4d)
- updated cell_table (14323a93e417562698a28bcd15481fad2422c878)
- ark_example.py docs (a3374ac5747c1f5fc6b7a2dd21b5017a66ee86e2)
- updated datasets (a3b0db4fa93c194bfcaf5d4daccbe6573c6a6f7c)
- moved cell and pixel datasets to pixie directory (0f5a19ebbb9f9c650e036b6e811b1479812a2f72)
- fixed cell_clustering params (f3b76074a5f9dec69e8ebc6f957c36da4dd7008a)

ark_example.py CHANGED
@@ -14,18 +14,27 @@
14
 
15
  """
16
  This dataset contains example data for running through the multiplexed imaging data pipeline in
17
- Ark Analysis: https://github.com/angelolab/ark-analysis
18
- """
19
 
20
- import json
21
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  import datasets
24
  import pathlib
25
- import glob
26
- import tifffile
27
- import xarray as xr
28
- import numpy as np
29
 
30
  # Find for instance the citation on arxiv or on the dataset repo/website
31
  _CITATION = """\
@@ -46,35 +55,46 @@ _HOMEPAGE = "https://github.com/angelolab/ark-analysis"
46
 
47
  _LICENSE = "https://github.com/angelolab/ark-analysis/blob/main/LICENSE"
48
 
49
- # TODO: Add link to the official dataset URLs here
50
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
51
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
52
- _URL_REPO = "https://huggingface.co/datasets/angelolab/ark_example/resolve/main"
53
 
54
 
55
- _URLS = {"base_dataset": f"{_URL_REPO}/data/image_data.zip"}
 
 
 
 
 
 
56
 
57
- """
58
- Dataset Fov renaming:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- TMA2_R8C3 -> fov0
61
- TMA6_R4C5 -> fov1
62
- TMA7_R5C4 -> fov2
63
- TMA10_R7C3 -> fov3
64
- TMA11_R9C6 -> fov4
65
- TMA13_R8C5 -> fov5
66
- TMA17_R9C2 -> fov6
67
- TMA18_R9C2 -> fov7
68
- TMA21_R2C5 -> fov8
69
- TMA21_R12C6 -> fov9
70
- TMA24_R9C1 -> fov10
71
- """
72
 
73
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
74
  class ArkExample(datasets.GeneratorBasedBuilder):
75
  """The Dataset consists of 11 FOVs"""
76
 
77
- VERSION = datasets.Version("0.0.1")
78
 
79
  # This is an example of a dataset with multiple configurations.
80
  # If you don't want/need to define several sub-sets in your dataset,
@@ -85,45 +105,42 @@ class ArkExample(datasets.GeneratorBasedBuilder):
85
  # BUILDER_CONFIG_CLASS = MyBuilderConfig
86
 
87
  # You will be able to load one or the other configurations in the following list with
88
- # data = datasets.load_dataset('my_dataset', 'base_dataset')
89
- # data = datasets.load_dataset('my_dataset', 'dev_dataset')
90
  BUILDER_CONFIGS = [
91
  datasets.BuilderConfig(
92
- name="base_dataset",
 
 
 
 
 
93
  version=VERSION,
94
- description="This dataset contains only the 12 FOVs.",
95
  ),
96
  datasets.BuilderConfig(
97
- name="dev_dataset",
98
  version=VERSION,
99
- description="This dataset is a superset of the base_dataset, and contains intermediate data for all notebooks. \
100
- Therefore you can start at any notebook with this dataset.",
 
 
 
 
101
  ),
102
  ]
103
 
104
- DEFAULT_CONFIG_NAME = (
105
- "base_dataset" # It's not mandatory to have a default configuration. Just use one if it make sense.
106
- )
107
-
108
  def _info(self):
109
  # This is the name of the configuration selected in BUILDER_CONFIGS above
110
- if self.config.name == "base_dataset":
111
- features = datasets.Features(
112
- {
113
- "Channel Data": datasets.Sequence(datasets.Image()),
114
- "Channel Names": datasets.Sequence(datasets.Value("string")),
115
- "Data Path": datasets.Value("string"),
116
- }
117
- )
118
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
119
  features = datasets.Features(
120
- {
121
- "sentence": datasets.Value("string"),
122
- "option2": datasets.Value("string"),
123
- "second_domain_answer": datasets.Value("string")
124
- # These are the features of your dataset like images, labels ...
125
- }
126
  )
 
 
127
  return datasets.DatasetInfo(
128
  # This is the description that will appear on the datasets page.
129
  description=_DESCRIPTION,
@@ -141,50 +158,21 @@ class ArkExample(datasets.GeneratorBasedBuilder):
141
  )
142
 
143
  def _split_generators(self, dl_manager):
144
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
145
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
146
-
147
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
148
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
149
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
150
- urls = _URLS[self.config.name]
151
- data_dir = dl_manager.download_and_extract(urls)
152
 
153
  return [
154
  datasets.SplitGenerator(
155
- name="base_dataset",
156
  # These kwargs will be passed to _generate_examples
157
- gen_kwargs={"filepath": pathlib.Path(data_dir)},
158
  ),
159
  ]
160
 
161
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
162
- def _generate_examples(self, filepath: pathlib.Path):
163
-
164
- # Get all TMA paths
165
- file_paths = list(pathlib.Path(filepath / "image_data").glob("*"))
166
-
167
- # Loop over all the TMAs
168
- for fp in file_paths:
169
-
170
- # Get the TMA FOV Name
171
- fov_name = fp.stem
172
-
173
- # Get all channels per TMA FOV
174
- channel_paths = fp.glob("*.tiff")
175
-
176
- chan_data = []
177
- chan_names = []
178
- for chan in channel_paths:
179
- chan_name = chan.stem
180
- chan_image: np.ndarray = tifffile.imread(chan)
181
-
182
- chan_data.append(chan_image)
183
- chan_names.append(chan_name)
184
-
185
- if self.config.name == "base_dataset":
186
- yield fov_name, {
187
- "Channel Data": chan_data,
188
- "Channel Names": chan_names,
189
- "Data Path": filepath.as_posix(),
190
- }
 
14
 
15
  """
16
  This dataset contains example data for running through the multiplexed imaging data pipeline in
17
+ Ark Analysis: https://github.com/angelolab/ark-analysis.
18
+
19
 
20
+ Dataset Fov renaming:
21
+
22
+ TMA2_R8C3 -> fov0
23
+ TMA6_R4C5 -> fov1
24
+ TMA7_R5C4 -> fov2
25
+ TMA10_R7C3 -> fov3
26
+ TMA11_R9C6 -> fov4
27
+ TMA13_R8C5 -> fov5
28
+ TMA17_R9C2 -> fov6
29
+ TMA18_R9C2 -> fov7
30
+ TMA21_R2C5 -> fov8
31
+ TMA21_R12C6 -> fov9
32
+ TMA24_R9C1 -> fov10
33
+
34
+ """
35
 
36
  import datasets
37
  import pathlib
 
 
 
 
38
 
39
  # Find for instance the citation on arxiv or on the dataset repo/website
40
  _CITATION = """\
 
55
 
56
  _LICENSE = "https://github.com/angelolab/ark-analysis/blob/main/LICENSE"
57
 
 
58
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
59
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
60
+ # _URL_REPO = "https://huggingface.co/datasets/angelolab/ark_example"
61
 
62
 
63
+ _URL_DATA = {
64
+ "image_data": "./data/image_data.zip",
65
+ "cell_table": "./data/segmentation/cell_table.zip",
66
+ "deepcell_output": "./data/segmentation/deepcell_output.zip",
67
+ "example_pixel_output_dir": "./data/pixie/example_pixel_output_dir.zip",
68
+ "example_cell_output_dir": "./data/pixie/example_cell_output_dir.zip",
69
+ }
70
 
71
+ _URL_DATASET_CONFIGS = {
72
+ "segment_image_data": {"image_data": _URL_DATA["image_data"]},
73
+ "cluster_pixels": {
74
+ "image_data": _URL_DATA["image_data"],
75
+ "cell_table": _URL_DATA["cell_table"],
76
+ "deepcell_output": _URL_DATA["deepcell_output"],
77
+ },
78
+ "cluster_cells": {
79
+ "image_data": _URL_DATA["image_data"],
80
+ "cell_table": _URL_DATA["cell_table"],
81
+ "deepcell_output": _URL_DATA["deepcell_output"],
82
+ "example_pixel_output_dir": _URL_DATA["example_pixel_output_dir"],
83
+ },
84
+ "post_clustering": {
85
+ "image_data": _URL_DATA["image_data"],
86
+ "cell_table": _URL_DATA["cell_table"],
87
+ "deepcell_output": _URL_DATA["deepcell_output"],
88
+ "example_cell_output_dir": _URL_DATA["example_cell_output_dir"],
89
+ },
90
+ }
91
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ # Note: Name of the dataset usually match the script name with CamelCase instead of snake_case
94
  class ArkExample(datasets.GeneratorBasedBuilder):
95
  """The Dataset consists of 11 FOVs"""
96
 
97
+ VERSION = datasets.Version("0.0.3")
98
 
99
  # This is an example of a dataset with multiple configurations.
100
  # If you don't want/need to define several sub-sets in your dataset,
 
105
  # BUILDER_CONFIG_CLASS = MyBuilderConfig
106
 
107
  # You will be able to load one or the other configurations in the following list with
 
 
108
  BUILDER_CONFIGS = [
109
  datasets.BuilderConfig(
110
+ name="segment_image_data",
111
+ version=VERSION,
112
+ description="This configuration contains data used by notebook 1 - Segment Image Data.",
113
+ ),
114
+ datasets.BuilderConfig(
115
+ name="cluster_pixels",
116
  version=VERSION,
117
+ description="This configuration contains data used by notebook 2 - Pixel Clustering (Pixie Pipeline #1).",
118
  ),
119
  datasets.BuilderConfig(
120
+ name="cluster_cells",
121
  version=VERSION,
122
+ description="This configuration contains data used by notebook 3 - Cell Clustering (Pixie Pipeline #2).",
123
+ ),
124
+ datasets.BuilderConfig(
125
+ name="post_clustering",
126
+ version=VERSION,
127
+ description="This configuration contains data used by notebook 4 - Post Clustering.",
128
  ),
129
  ]
130
 
 
 
 
 
131
  def _info(self):
132
  # This is the name of the configuration selected in BUILDER_CONFIGS above
133
+ if self.config.name in [
134
+ "segment_image_data",
135
+ "cluster_pixels",
136
+ "cluster_cells",
137
+ "post_clustering",
138
+ ]:
 
 
 
139
  features = datasets.Features(
140
+ {f: datasets.Value("string") for f in _URL_DATASET_CONFIGS[self.config.name].keys()}
 
 
 
 
 
141
  )
142
+ else:
143
+ ValueError(f"Dataset name is incorrect, options include {list(_URL_DATASET_CONFIGS.keys())}")
144
  return datasets.DatasetInfo(
145
  # This is the description that will appear on the datasets page.
146
  description=_DESCRIPTION,
 
158
  )
159
 
160
  def _split_generators(self, dl_manager):
161
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
162
+ urls = _URL_DATASET_CONFIGS[self.config.name]
163
+ data_dirs = {}
164
+ for data_name, url in urls.items():
165
+ dl_path = pathlib.Path(dl_manager.download_and_extract(url))
166
+ data_dirs[data_name] = dl_path
 
 
167
 
168
  return [
169
  datasets.SplitGenerator(
170
+ name=self.config.name,
171
  # These kwargs will be passed to _generate_examples
172
+ gen_kwargs={"dataset_paths": data_dirs},
173
  ),
174
  ]
175
 
176
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
177
+ def _generate_examples(self, dataset_paths):
178
+ yield self.config.name, dataset_paths
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/image_data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a68ba7a78e778830f5c7c937f3228d04e35745b1c51c004cd0d405780a42361
3
- size 400326580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aeaa113900f5067f2cef3d5533c3c0b6ae5538dd86f952193b6271135c0e2a0
3
+ size 400230929
data/pixie/example_cell_output_dir.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4db7ebdd0aa88371ce348a4a5c5f851077cb3bffa1289c014173a0f1492c0006
3
+ size 86425942
data/pixie/example_pixel_output_dir.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b7f4a006fa021d0547592d3e290f122f342b1d6330107b812b0dd6bf5659a14
3
+ size 1494868926
data/segmentation/cell_table.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82b8d88726130899575f97636539f601f0dedcead4b167cfc3a17964a9f4d76c
3
+ size 15162294
data/segmentation/deepcell_output.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54aaebefc23cb41777e24ac85aa23fc495c31591cf0f8309eb8b8751437e5294
3
+ size 1552387