Modify datasets & transforms script
Browse files- rsna_datasets.py +44 -87
- rsna_transforms.py +63 -190
rsna_datasets.py
CHANGED
@@ -15,7 +15,6 @@ class Segmentation3DDataset(IterableDataset):
|
|
15 |
split: Literal["train", "test"],
|
16 |
streaming: bool = True,
|
17 |
volume_transforms: monai.transforms.Compose = None,
|
18 |
-
mask_transforms: monai.transforms.Compose = None,
|
19 |
transform_configs: TypedDict(
|
20 |
"",
|
21 |
{
|
@@ -53,13 +52,6 @@ class Segmentation3DDataset(IterableDataset):
|
|
53 |
streaming=streaming,
|
54 |
)
|
55 |
|
56 |
-
self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
|
57 |
-
crop_strategy=transform_configs["crop_strategy"],
|
58 |
-
voxel_spacing=transform_configs["voxel_spacing"],
|
59 |
-
volume_size=transform_configs["volume_size"],
|
60 |
-
axcodes=transform_configs["axcodes"],
|
61 |
-
streaming=streaming,
|
62 |
-
)
|
63 |
self.yield_extra_info = True # For debugging purposes
|
64 |
|
65 |
def __iter__(self):
|
@@ -78,23 +70,20 @@ class Segmentation3DDataset(IterableDataset):
|
|
78 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
79 |
|
80 |
def _process_one_sample(self, data, worker_id):
|
81 |
-
|
82 |
-
|
83 |
-
)
|
84 |
-
seg_data = self.mask_transforms({"seg": data["seg_path"]})
|
85 |
|
86 |
-
|
87 |
-
seg_data = [seg_data] if not isinstance(seg_data, (list, tuple)) else seg_data
|
88 |
|
89 |
-
for
|
90 |
to_yield = {
|
91 |
-
"img":
|
92 |
-
"seg":
|
93 |
}
|
94 |
if self.yield_extra_info:
|
95 |
to_yield["worker_id"] = worker_id
|
96 |
-
to_yield["series_id"] = data["metadata"]["series_id"]
|
97 |
-
|
98 |
yield to_yield
|
99 |
|
100 |
|
@@ -188,7 +177,6 @@ class MaskedClassification3DDataset(IterableDataset):
|
|
188 |
split: Literal["train", "test"],
|
189 |
streaming: bool = True,
|
190 |
volume_transforms: monai.transforms.Compose = None,
|
191 |
-
mask_transforms: monai.transforms.Compose = None,
|
192 |
transform_configs: TypedDict(
|
193 |
"",
|
194 |
{
|
@@ -225,13 +213,6 @@ class MaskedClassification3DDataset(IterableDataset):
|
|
225 |
axcodes=transform_configs["axcodes"],
|
226 |
streaming=streaming,
|
227 |
)
|
228 |
-
self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
|
229 |
-
crop_strategy=transform_configs["crop_strategy"],
|
230 |
-
voxel_spacing=transform_configs["voxel_spacing"],
|
231 |
-
volume_size=transform_configs["volume_size"],
|
232 |
-
axcodes=transform_configs["axcodes"],
|
233 |
-
streaming=streaming,
|
234 |
-
)
|
235 |
|
236 |
self.yield_extra_info = True
|
237 |
|
@@ -251,17 +232,23 @@ class MaskedClassification3DDataset(IterableDataset):
|
|
251 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
252 |
|
253 |
def _process_one_sample(self, data, worker_id):
|
254 |
-
|
255 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
)
|
257 |
-
seg_data = self.mask_transforms({"seg": data["seg_path"]})
|
258 |
-
img_data = [img_data] if not isinstance(img_data, (list, tuple)) else img_data
|
259 |
-
seg_data = [seg_data] if not isinstance(seg_data, (list, tuple)) else seg_data
|
260 |
|
261 |
-
for
|
262 |
to_yield = {
|
263 |
-
"img":
|
264 |
-
"seg":
|
265 |
"bowel": data["bowel"],
|
266 |
"extravasation": data["extravasation"],
|
267 |
"kidney": data["kidney"],
|
@@ -283,7 +270,6 @@ class Segmentation2DDataset(IterableDataset):
|
|
283 |
split: Literal["train", "test"],
|
284 |
streaming: bool = True,
|
285 |
volume_transforms: monai.transforms.Compose = None,
|
286 |
-
mask_transforms: monai.transforms.Compose = None,
|
287 |
slice_transforms: torchvision.transforms.Compose = None,
|
288 |
volume_transform_configs: TypedDict(
|
289 |
"",
|
@@ -333,13 +319,6 @@ class Segmentation2DDataset(IterableDataset):
|
|
333 |
axcodes=volume_transform_configs["axcodes"],
|
334 |
streaming=streaming,
|
335 |
)
|
336 |
-
self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
|
337 |
-
crop_strategy=volume_transform_configs["crop_strategy"],
|
338 |
-
voxel_spacing=volume_transform_configs["voxel_spacing"],
|
339 |
-
volume_size=volume_transform_configs["volume_size"],
|
340 |
-
axcodes=volume_transform_configs["axcodes"],
|
341 |
-
streaming=streaming,
|
342 |
-
)
|
343 |
self.slice_transforms = slice_transforms or rsna_transforms.slice_transforms(
|
344 |
crop_strategy=slice_transform_configs["crop_strategy"],
|
345 |
shorter_edge_length=slice_transform_configs["shorter_edge_length"],
|
@@ -363,26 +342,20 @@ class Segmentation2DDataset(IterableDataset):
|
|
363 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
364 |
|
365 |
def _process_one_sample(self, data, worker_id):
|
366 |
-
|
367 |
-
{
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
if not isinstance(vol_img_data, (list, tuple))
|
373 |
-
else vol_img_data
|
374 |
-
)
|
375 |
-
vol_seg_data = (
|
376 |
-
[vol_seg_data]
|
377 |
-
if not isinstance(vol_seg_data, (list, tuple))
|
378 |
-
else vol_seg_data
|
379 |
)
|
|
|
380 |
|
381 |
-
for
|
382 |
-
slice_len =
|
383 |
for i in range(slice_len):
|
384 |
-
slice_img_data = self.slice_transforms(
|
385 |
-
slice_seg_data = self.slice_transforms(
|
386 |
|
387 |
slice_img_data = (
|
388 |
[slice_img_data]
|
@@ -528,7 +501,6 @@ class MaskedClassification2DDataset(IterableDataset):
|
|
528 |
split: Literal["train", "test"],
|
529 |
streaming: bool = True,
|
530 |
volume_transforms: monai.transforms.Compose = None,
|
531 |
-
mask_transforms: monai.transforms.Compose = None,
|
532 |
slice_transforms: torchvision.transforms.Compose = None,
|
533 |
volume_transform_configs: TypedDict(
|
534 |
"",
|
@@ -579,14 +551,6 @@ class MaskedClassification2DDataset(IterableDataset):
|
|
579 |
streaming=streaming,
|
580 |
)
|
581 |
|
582 |
-
self.mask_transforms = mask_transforms or rsna_transforms.mask_transforms(
|
583 |
-
crop_strategy=volume_transform_configs["crop_strategy"],
|
584 |
-
voxel_spacing=volume_transform_configs["voxel_spacing"],
|
585 |
-
volume_size=volume_transform_configs["volume_size"],
|
586 |
-
axcodes=volume_transform_configs["axcodes"],
|
587 |
-
streaming=streaming,
|
588 |
-
)
|
589 |
-
|
590 |
self.slice_transforms = slice_transforms or rsna_transforms.slice_transforms(
|
591 |
crop_strategy=slice_transform_configs["crop_strategy"],
|
592 |
shorter_edge_length=slice_transform_configs["shorter_edge_length"],
|
@@ -610,26 +574,20 @@ class MaskedClassification2DDataset(IterableDataset):
|
|
610 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
611 |
|
612 |
def _process_one_sample(self, data, worker_id):
|
613 |
-
|
614 |
-
{
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
if not isinstance(vol_img_data, (list, tuple))
|
620 |
-
else vol_img_data
|
621 |
-
)
|
622 |
-
vol_seg_data = (
|
623 |
-
[vol_seg_data]
|
624 |
-
if not isinstance(vol_seg_data, (list, tuple))
|
625 |
-
else vol_seg_data
|
626 |
)
|
|
|
627 |
|
628 |
-
for
|
629 |
-
slice_len =
|
630 |
for i in range(slice_len):
|
631 |
-
slice_img_data = self.slice_transforms(
|
632 |
-
slice_seg_data = self.slice_transforms(
|
633 |
|
634 |
slice_img_data = (
|
635 |
[slice_img_data]
|
@@ -658,4 +616,3 @@ class MaskedClassification2DDataset(IterableDataset):
|
|
658 |
to_yield["series_id"] = data["metadata"]["series_id"]
|
659 |
|
660 |
yield to_yield
|
661 |
-
|
|
|
15 |
split: Literal["train", "test"],
|
16 |
streaming: bool = True,
|
17 |
volume_transforms: monai.transforms.Compose = None,
|
|
|
18 |
transform_configs: TypedDict(
|
19 |
"",
|
20 |
{
|
|
|
52 |
streaming=streaming,
|
53 |
)
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
self.yield_extra_info = True # For debugging purposes
|
56 |
|
57 |
def __iter__(self):
|
|
|
70 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
71 |
|
72 |
def _process_one_sample(self, data, worker_id):
|
73 |
+
data["img"] = data.pop("img_path")
|
74 |
+
data["seg"] = data.pop("seg_path")
|
75 |
+
data = self.volume_transforms(data)
|
|
|
76 |
|
77 |
+
data = [data] if not isinstance(data, (list, tuple)) else data
|
|
|
78 |
|
79 |
+
for crop in data:
|
80 |
to_yield = {
|
81 |
+
"img": crop["img"],
|
82 |
+
"seg": crop["seg"],
|
83 |
}
|
84 |
if self.yield_extra_info:
|
85 |
to_yield["worker_id"] = worker_id
|
86 |
+
to_yield["series_id"] = data[0]["metadata"]["series_id"]
|
|
|
87 |
yield to_yield
|
88 |
|
89 |
|
|
|
177 |
split: Literal["train", "test"],
|
178 |
streaming: bool = True,
|
179 |
volume_transforms: monai.transforms.Compose = None,
|
|
|
180 |
transform_configs: TypedDict(
|
181 |
"",
|
182 |
{
|
|
|
213 |
axcodes=transform_configs["axcodes"],
|
214 |
streaming=streaming,
|
215 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
|
217 |
self.yield_extra_info = True
|
218 |
|
|
|
232 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
233 |
|
234 |
def _process_one_sample(self, data, worker_id):
|
235 |
+
img_seg_data = self.volume_transforms(
|
236 |
+
{
|
237 |
+
"img": data["img_path"],
|
238 |
+
"seg": data["seg_path"],
|
239 |
+
"metadata": data["metadata"],
|
240 |
+
}
|
241 |
+
)
|
242 |
+
img_seg_data = (
|
243 |
+
[img_seg_data]
|
244 |
+
if not isinstance(img_seg_data, (list, tuple))
|
245 |
+
else img_seg_data
|
246 |
)
|
|
|
|
|
|
|
247 |
|
248 |
+
for img_seg in img_seg_data:
|
249 |
to_yield = {
|
250 |
+
"img": img_seg["img"],
|
251 |
+
"seg": img_seg["seg"],
|
252 |
"bowel": data["bowel"],
|
253 |
"extravasation": data["extravasation"],
|
254 |
"kidney": data["kidney"],
|
|
|
270 |
split: Literal["train", "test"],
|
271 |
streaming: bool = True,
|
272 |
volume_transforms: monai.transforms.Compose = None,
|
|
|
273 |
slice_transforms: torchvision.transforms.Compose = None,
|
274 |
volume_transform_configs: TypedDict(
|
275 |
"",
|
|
|
319 |
axcodes=volume_transform_configs["axcodes"],
|
320 |
streaming=streaming,
|
321 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
self.slice_transforms = slice_transforms or rsna_transforms.slice_transforms(
|
323 |
crop_strategy=slice_transform_configs["crop_strategy"],
|
324 |
shorter_edge_length=slice_transform_configs["shorter_edge_length"],
|
|
|
342 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
343 |
|
344 |
def _process_one_sample(self, data, worker_id):
|
345 |
+
vol_data = self.volume_transforms(
|
346 |
+
{
|
347 |
+
"img": data["img_path"],
|
348 |
+
"seg": data["seg_path"],
|
349 |
+
"metadata": data["metadata"],
|
350 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
351 |
)
|
352 |
+
vol_data = [vol_data] if not isinstance(vol_data, (list, tuple)) else vol_data
|
353 |
|
354 |
+
for vol in vol_data:
|
355 |
+
slice_len = vol["img"].size()[-1]
|
356 |
for i in range(slice_len):
|
357 |
+
slice_img_data = self.slice_transforms(vol["img"][..., i])
|
358 |
+
slice_seg_data = self.slice_transforms(vol["seg"][..., i])
|
359 |
|
360 |
slice_img_data = (
|
361 |
[slice_img_data]
|
|
|
501 |
split: Literal["train", "test"],
|
502 |
streaming: bool = True,
|
503 |
volume_transforms: monai.transforms.Compose = None,
|
|
|
504 |
slice_transforms: torchvision.transforms.Compose = None,
|
505 |
volume_transform_configs: TypedDict(
|
506 |
"",
|
|
|
551 |
streaming=streaming,
|
552 |
)
|
553 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
554 |
self.slice_transforms = slice_transforms or rsna_transforms.slice_transforms(
|
555 |
crop_strategy=slice_transform_configs["crop_strategy"],
|
556 |
shorter_edge_length=slice_transform_configs["shorter_edge_length"],
|
|
|
574 |
yield from self._process_one_sample(data, worker_id=worker_id)
|
575 |
|
576 |
def _process_one_sample(self, data, worker_id):
|
577 |
+
vol_data = self.volume_transforms(
|
578 |
+
{
|
579 |
+
"img": data["img_path"],
|
580 |
+
"seg": data["seg_path"],
|
581 |
+
"metadata": data["metadata"],
|
582 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
583 |
)
|
584 |
+
vol_data = [vol_data] if not isinstance(vol_data, (list, tuple)) else vol_data
|
585 |
|
586 |
+
for vol in vol_data:
|
587 |
+
slice_len = vol["img"].size()[-1]
|
588 |
for i in range(slice_len):
|
589 |
+
slice_img_data = self.slice_transforms(vol["img"][..., i])
|
590 |
+
slice_seg_data = self.slice_transforms(vol["seg"][..., i])
|
591 |
|
592 |
slice_img_data = (
|
593 |
[slice_img_data]
|
|
|
616 |
to_yield["series_id"] = data["metadata"]["series_id"]
|
617 |
|
618 |
yield to_yield
|
|
rsna_transforms.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from typing import Optional, Literal
|
2 |
|
3 |
from io import BytesIO
|
4 |
import numpy as np
|
@@ -386,206 +386,79 @@ def volume_transforms(
|
|
386 |
volume_size: tuple[int, int, int] = (96, 96, 96),
|
387 |
axcodes: str = "RAS",
|
388 |
streaming: bool = False,
|
389 |
-
)
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
|
|
|
|
423 |
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
x_key="img",
|
435 |
-
metadata_key="metadata",
|
436 |
-
meta_pixel_representation_key="pixel_representation",
|
437 |
-
meta_bits_allocated_key="bits_allocated",
|
438 |
-
meta_bits_stored_key="bits_stored",
|
439 |
-
),
|
440 |
-
monai.transforms.EnsureChannelFirstd(keys=["img"]),
|
441 |
-
monai.transforms.Orientationd(keys=["img"], axcodes=axcodes),
|
442 |
-
monai.transforms.Spacingd(
|
443 |
-
keys=["img"], pixdim=voxel_spacing, mode=["bilinear"]
|
444 |
-
),
|
445 |
-
monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
|
446 |
-
monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
|
447 |
-
monai.transforms.SpatialPadd(keys=["img"], spatial_size=volume_size),
|
448 |
-
monai.transforms.CenterSpatialCropd(keys=["img"], roi_size=volume_size),
|
449 |
-
]
|
450 |
)
|
451 |
-
|
452 |
elif crop_strategy == "random":
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
UnifyUnusualNIfTI(
|
462 |
-
x_key="img",
|
463 |
-
metadata_key="metadata",
|
464 |
-
meta_pixel_representation_key="pixel_representation",
|
465 |
-
meta_bits_allocated_key="bits_allocated",
|
466 |
-
meta_bits_stored_key="bits_stored",
|
467 |
-
),
|
468 |
-
monai.transforms.EnsureChannelFirstd(keys=["img"]),
|
469 |
-
monai.transforms.Orientationd(keys=["img"], axcodes=axcodes),
|
470 |
-
monai.transforms.Spacingd(
|
471 |
-
keys=["img"], pixdim=voxel_spacing, mode=["bilinear"]
|
472 |
-
),
|
473 |
-
monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
|
474 |
-
monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
|
475 |
-
monai.transforms.SpatialPadd(keys=["img"], spatial_size=volume_size),
|
476 |
-
monai.transforms.RandSpatialCropd(
|
477 |
-
keys=["img"],
|
478 |
-
roi_size=volume_size,
|
479 |
-
random_center=True,
|
480 |
-
random_size=False,
|
481 |
-
),
|
482 |
-
]
|
483 |
-
)
|
484 |
-
|
485 |
-
elif crop_strategy == "none" or crop_strategy is None:
|
486 |
-
return monai.transforms.Compose(
|
487 |
-
[
|
488 |
-
LoadNIfTIFromHFHubd(keys=["img"])
|
489 |
-
if streaming
|
490 |
-
else LoadNIfTIFromLocalCached(keys=["img"]),
|
491 |
-
monai.transforms.EnsureTyped(
|
492 |
-
keys=["img"], data_type="tensor", dtype=torch.float32
|
493 |
-
),
|
494 |
-
UnifyUnusualNIfTI(
|
495 |
-
x_key="img",
|
496 |
-
metadata_key="metadata",
|
497 |
-
meta_pixel_representation_key="pixel_representation",
|
498 |
-
meta_bits_allocated_key="bits_allocated",
|
499 |
-
meta_bits_stored_key="bits_stored",
|
500 |
-
),
|
501 |
-
monai.transforms.EnsureChannelFirstd(keys=["img"]),
|
502 |
-
monai.transforms.Orientationd(keys=["img"], axcodes=axcodes),
|
503 |
-
monai.transforms.Spacingd(
|
504 |
-
keys=["img"], pixdim=voxel_spacing, mode=["bilinear"]
|
505 |
-
),
|
506 |
-
monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
|
507 |
-
monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
|
508 |
-
]
|
509 |
-
)
|
510 |
-
|
511 |
-
else:
|
512 |
-
raise ValueError(
|
513 |
-
f"crop_strategy must be one of ['oversample', 'center', 'random', 'none'], got {crop_strategy}."
|
514 |
-
)
|
515 |
-
|
516 |
-
|
517 |
-
def mask_transforms(
|
518 |
-
crop_strategy: Optional[Literal["oversample", "center", "none"]] = "oversample",
|
519 |
-
voxel_spacing: tuple[float, float, float] = (3.0, 3.0, 3.0),
|
520 |
-
volume_size: tuple[int, int, int] = (96, 96, 96),
|
521 |
-
axcodes: str = "RAS",
|
522 |
-
streaming: bool = False,
|
523 |
-
) -> monai.transforms.Compose:
|
524 |
-
if crop_strategy == "oversample":
|
525 |
-
return monai.transforms.Compose(
|
526 |
-
[
|
527 |
-
LoadNIfTIFromHFHubd(keys=["seg"])
|
528 |
-
if streaming
|
529 |
-
else LoadNIfTIFromLocalCached(keys=["seg"]),
|
530 |
-
monai.transforms.EnsureTyped(
|
531 |
-
keys=["seg"], data_type="tensor", dtype=torch.float32
|
532 |
-
),
|
533 |
-
monai.transforms.EnsureChannelFirstd(keys=["seg"]),
|
534 |
-
monai.transforms.Orientationd(keys=["seg"], axcodes=axcodes),
|
535 |
-
monai.transforms.Spacingd(
|
536 |
-
keys=["seg"], pixdim=voxel_spacing, mode=["nearest"]
|
537 |
-
),
|
538 |
-
monai.transforms.SpatialPadd(keys=["seg"], spatial_size=volume_size),
|
539 |
-
monai.transforms.RandSpatialCropSamplesd(
|
540 |
-
keys=["seg"],
|
541 |
-
roi_size=volume_size,
|
542 |
-
num_samples=3,
|
543 |
-
random_center=True,
|
544 |
-
random_size=False,
|
545 |
-
),
|
546 |
-
]
|
547 |
)
|
548 |
-
|
549 |
elif crop_strategy == "center":
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
else LoadNIfTIFromLocalCached(keys=["seg"]),
|
555 |
-
monai.transforms.EnsureTyped(
|
556 |
-
keys=["seg"], data_type="tensor", dtype=torch.float32
|
557 |
-
),
|
558 |
-
monai.transforms.EnsureChannelFirstd(keys=["seg"]),
|
559 |
-
monai.transforms.Orientationd(keys=["seg"], axcodes=axcodes),
|
560 |
-
monai.transforms.Spacingd(
|
561 |
-
keys=["seg"], pixdim=voxel_spacing, mode=["nearest"]
|
562 |
-
),
|
563 |
-
monai.transforms.SpatialPadd(keys=["seg"], spatial_size=volume_size),
|
564 |
-
monai.transforms.CenterSpatialCropd(keys=["seg"], roi_size=volume_size),
|
565 |
-
]
|
566 |
)
|
567 |
-
|
568 |
elif crop_strategy == "none" or crop_strategy is None:
|
569 |
-
|
570 |
-
[
|
571 |
-
LoadNIfTIFromHFHubd(keys=["seg"])
|
572 |
-
if streaming
|
573 |
-
else LoadNIfTIFromLocalCached(keys=["seg"]),
|
574 |
-
monai.transforms.EnsureTyped(
|
575 |
-
keys=["seg"], data_type="tensor", dtype=torch.float32
|
576 |
-
),
|
577 |
-
monai.transforms.EnsureChannelFirstd(keys=["seg"]),
|
578 |
-
monai.transforms.Orientationd(keys=["seg"], axcodes=axcodes),
|
579 |
-
monai.transforms.Spacingd(
|
580 |
-
keys=["seg"], pixdim=voxel_spacing, mode=["nearest"]
|
581 |
-
),
|
582 |
-
]
|
583 |
-
)
|
584 |
else:
|
585 |
raise ValueError(
|
586 |
-
f"crop_strategy must be one of ['oversample', 'center', 'none'], got {crop_strategy}."
|
587 |
)
|
588 |
|
|
|
|
|
589 |
|
590 |
def slice_transforms(
|
591 |
crop_strategy: Literal["ten", "five", "center", "random"] = "ten",
|
|
|
1 |
+
from typing import Optional, Literal, Union
|
2 |
|
3 |
from io import BytesIO
|
4 |
import numpy as np
|
|
|
386 |
volume_size: tuple[int, int, int] = (96, 96, 96),
|
387 |
axcodes: str = "RAS",
|
388 |
streaming: bool = False,
|
389 |
+
):
|
390 |
+
transform_steps = [
|
391 |
+
LoadNIfTIFromHFHubd(keys=["img", "seg"], allow_missing_keys=True)
|
392 |
+
if streaming
|
393 |
+
else LoadNIfTIFromLocalCached(keys=["img", "seg"], allow_missing_keys=True),
|
394 |
+
monai.transforms.EnsureTyped(
|
395 |
+
keys=["img", "seg"],
|
396 |
+
data_type="tensor",
|
397 |
+
dtype=torch.float32,
|
398 |
+
allow_missing_keys=True,
|
399 |
+
),
|
400 |
+
UnifyUnusualNIfTI(
|
401 |
+
x_key="img",
|
402 |
+
metadata_key="metadata",
|
403 |
+
meta_pixel_representation_key="pixel_representation",
|
404 |
+
meta_bits_allocated_key="bits_allocated",
|
405 |
+
meta_bits_stored_key="bits_stored",
|
406 |
+
),
|
407 |
+
monai.transforms.EnsureChannelFirstd(
|
408 |
+
keys=["img", "seg"], allow_missing_keys=True
|
409 |
+
),
|
410 |
+
monai.transforms.Orientationd(
|
411 |
+
keys=["img", "seg"], axcodes=axcodes, allow_missing_keys=True
|
412 |
+
),
|
413 |
+
monai.transforms.Spacingd(
|
414 |
+
keys=["img", "seg"],
|
415 |
+
pixdim=voxel_spacing,
|
416 |
+
mode=["bilinear", "nearest"],
|
417 |
+
allow_missing_keys=True,
|
418 |
+
),
|
419 |
+
monai.transforms.NormalizeIntensityd(keys=["img"], nonzero=False),
|
420 |
+
monai.transforms.ScaleIntensityd(keys=["img"], minv=-1.0, maxv=1.0),
|
421 |
+
monai.transforms.SpatialPadd(
|
422 |
+
keys=["img", "seg"], spatial_size=volume_size, allow_missing_keys=True
|
423 |
+
),
|
424 |
+
]
|
425 |
|
426 |
+
if crop_strategy == "oversample":
|
427 |
+
transform_steps.append(
|
428 |
+
monai.transforms.RandSpatialCropSamplesd(
|
429 |
+
keys=["img", "seg"],
|
430 |
+
roi_size=volume_size,
|
431 |
+
num_samples=3,
|
432 |
+
random_center=True,
|
433 |
+
random_size=False,
|
434 |
+
allow_missing_keys=True,
|
435 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
436 |
)
|
|
|
437 |
elif crop_strategy == "random":
|
438 |
+
transform_steps.append(
|
439 |
+
monai.transforms.RandSpatialCropd(
|
440 |
+
keys=["img", "seg"],
|
441 |
+
roi_size=volume_size,
|
442 |
+
random_center=True,
|
443 |
+
random_size=False,
|
444 |
+
allow_missing_keys=True,
|
445 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
)
|
|
|
447 |
elif crop_strategy == "center":
|
448 |
+
transform_steps.append(
|
449 |
+
monai.transforms.CenterSpatialCropd(
|
450 |
+
keys=["img", "seg"], roi_size=volume_size, allow_missing_keys=True
|
451 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
452 |
)
|
|
|
453 |
elif crop_strategy == "none" or crop_strategy is None:
|
454 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
455 |
else:
|
456 |
raise ValueError(
|
457 |
+
f"crop_strategy must be one of ['oversample', 'center', 'random', 'none'], got {crop_strategy}."
|
458 |
)
|
459 |
|
460 |
+
return monai.transforms.Compose(transform_steps)
|
461 |
+
|
462 |
|
463 |
def slice_transforms(
|
464 |
crop_strategy: Literal["ten", "five", "center", "random"] = "ten",
|