Datasets:

Modalities:
Geospatial
Languages:
English
DOI:
Libraries:
License:
cmalbrec commited on
Commit
97064ef
·
verified ·
1 Parent(s): 80c0b47

one more round of dataset format updates

Browse files
Files changed (1) hide show
  1. dataset.py +23 -23
dataset.py CHANGED
@@ -1,5 +1,4 @@
1
 
2
- import os
3
  import datasets
4
 
5
  class ForestSegmentationDataset(datasets.GeneratorBasedBuilder):
@@ -7,36 +6,37 @@ class ForestSegmentationDataset(datasets.GeneratorBasedBuilder):
7
  return datasets.DatasetInfo(
8
  features=datasets.Features({
9
  "sample_id": datasets.Value("string"),
10
- "image": datasets.Image(), # Path to all_bands.tif
11
- "mask": datasets.Image(), # Path to mask.tif
 
 
 
 
 
 
 
 
 
12
  }),
13
  )
14
 
15
  def _split_generators(self, dl_manager):
16
- # Streaming mode: use iter_files to lazily iterate over image files
17
- image_paths = dl_manager.iter_files("images")
18
  return [
19
  datasets.SplitGenerator(
20
  name=datasets.Split.TRAIN,
21
- gen_kwargs={"image_paths": image_paths}
22
  )
23
  ]
24
 
25
- def _generate_examples(self, image_paths):
26
- for image_path in image_paths:
27
- if not image_path.endswith("all_bands.tif"):
28
- continue
29
-
30
- # Extract sample_id from path: images/0000005/<timestamp>/all_bands.tif
31
- parts = image_path.split("/")
32
- if len(parts) < 3:
33
- continue # Skip malformed paths
34
-
35
- sample_id = parts[-3] # "0000005"
36
- mask_path = f"masks/{sample_id}/mask.tif"
37
-
38
- yield sample_id, {
39
- "sample_id": sample_id,
40
- "image": image_path,
41
- "mask": mask_path,
42
  }
 
1
 
 
2
  import datasets
3
 
4
  class ForestSegmentationDataset(datasets.GeneratorBasedBuilder):
 
6
  return datasets.DatasetInfo(
7
  features=datasets.Features({
8
  "sample_id": datasets.Value("string"),
9
+ "image_paths": datasets.Sequence(datasets.Value("string")), # list of GeoTIFF paths
10
+ "mask": datasets.Value("string"), # path to mask
11
+ "start_timestamps": datasets.Sequence(datasets.Value("string")),
12
+ "end_timestamps": datasets.Sequence(datasets.Value("string")),
13
+ "sentinel_tile_ids": datasets.Sequence(datasets.Value("string")),
14
+ "bboxes": {
15
+ "minx": datasets.Sequence(datasets.Value("float32")),
16
+ "maxx": datasets.Sequence(datasets.Value("float32")),
17
+ "miny": datasets.Sequence(datasets.Value("float32")),
18
+ "maxy": datasets.Sequence(datasets.Value("float32")),
19
+ }
20
  }),
21
  )
22
 
23
  def _split_generators(self, dl_manager):
24
+ sample_stream = dl_manager.iter_jsonl("index.jsonl")
 
25
  return [
26
  datasets.SplitGenerator(
27
  name=datasets.Split.TRAIN,
28
+ gen_kwargs={"samples": sample_stream}
29
  )
30
  ]
31
 
32
+ def _generate_examples(self, samples):
33
+ for sample in samples:
34
+ yield sample["sample_id"], {
35
+ "sample_id": sample["sample_id"],
36
+ "image_paths": sample["image_paths"],
37
+ "mask": sample["mask_path"],
38
+ "start_timestamps": sample["start_timestamps"],
39
+ "end_timestamps": sample["end_timestamps"],
40
+ "sentinel_tile_ids": sample["sentinel_tile_ids"],
41
+ "bboxes": sample["bboxes"],
 
 
 
 
 
 
 
42
  }