File size: 2,707 Bytes
fff2423
 
4b01d0b
f3cd12b
4b01d0b
f3cd12b
4b01d0b
fff2423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
882edd3
fff2423
 
 
 
 
 
 
 
 
049559c
fff2423
 
 
 
 
 
c13f51a
 
f3cd12b
fff2423
 
 
 
 
4cf87cc
fff2423
 
 
 
 
 
5d66ba4
4cf87cc
fff2423
 
 
 
 
 
 
4cf87cc
fff2423
 
 
 
 
 
 
52706f7
8e87b6a
e54d51d
c13f51a
da1d304
0747b14
c13f51a
8e87b6a
0747b14
da1d304
fff2423
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import datasets

datasets.logging.set_verbosity_debug()
#datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """\
A segmentation dataset for [TODO: complete...]
"""


_HOMEPAGE = "https://huggingface.co/datasets/alkzar90/cell_benchmark"
_EXTENSION = [".jpg", ".png"]
_URL_BASE = "https://huggingface.co/datasets/alkzar90/cell_benchmark/resolve/main/data/"
_SPLIT_URLS = {
  "train":  _URL_BASE + "train.zip",
  "val":    _URL_BASE + "val.zip",
  "test":   _URL_BASE + "test.zip",
  "masks":  _URL_BASE + "masks.zip",
}



class Cellsegmentation(datasets.GeneratorBasedBuilder):

  def _info(self):
    features = datasets.Features({
         "image": datasets.Image(),
         "masks": datasets.Image(),
         "path" : datasets.Value("string"),
      })
    return datasets.DatasetInfo(
        description=_DESCRIPTION,
        features=datasets.Features(features),
        supervised_keys=("image", "masks"),
        homepage=_HOMEPAGE,
        citation="",
    )


  def _split_generators(self, dl_manager):
    data_files = dl_manager.download_and_extract(_SPLIT_URLS)
    #masks_dir = [os.path.dirname(path) for i, path in enumerate(dl_manager.iter_files([data_files["masks"]])) if i < 1][0]
    masks_dir = dl_manager.iter_files([data_files["masks"]])
      
    splits = [
           datasets.SplitGenerator(
              name=datasets.Split.TRAIN,
              gen_kwargs={
                  "files" : dl_manager.iter_files([data_files["train"]]),
                  "masks_dir": masks_dir,
                  "split":  "training",
              },
           ),
           datasets.SplitGenerator(
              name=datasets.Split.VALIDATION,
              gen_kwargs={
                  "files" : dl_manager.iter_files([data_files["val"]]),
                  "masks_dir": masks_dir,
                  "split": "validation",
              },
           ),
           datasets.SplitGenerator(
              name=datasets.Split.TEST,
              gen_kwargs={
                  "files" : dl_manager.iter_files([data_files["test"]]),
                  "masks_dir": masks_dir,
                  "split": "test",
              }
           )
    ]
    return splits


  def _generate_examples(self, files, masks_dir, split):
    masks_dir = [os.path.dirname(path) for i, path in enumerate(masks_dir) if i < 1][0]
    for i, path in enumerate(files):
      file_name = "/mask_" + os.path.basename(path).replace("jpg", "png")
      yield i, {
           "image": path,
           #"masks": masks_dir + "/mask_" + file_name.replace("jpg", "png"),
           "masks": masks_dir + file_name,
           "path": path,
      }