| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import logging |
| import os |
| import sys |
| import tempfile |
| from glob import glob |
|
|
| import nibabel as nib |
| import numpy as np |
| import torch |
| from ignite.metrics import Accuracy |
|
|
| import monai |
| from monai.data import create_test_image_3d |
| from monai.engines import SupervisedEvaluator |
| from monai.handlers import CheckpointLoader, MeanDice, SegmentationSaver, StatsHandler |
| from monai.inferers import SlidingWindowInferer |
| from monai.transforms import ( |
| Activationsd, |
| AsChannelFirstd, |
| AsDiscreted, |
| Compose, |
| KeepLargestConnectedComponentd, |
| LoadNiftid, |
| ScaleIntensityd, |
| ToTensord, |
| ) |
|
|
|
|
| def main(tempdir): |
| monai.config.print_config() |
| logging.basicConfig(stream=sys.stdout, level=logging.INFO) |
|
|
| |
| print(f"generating synthetic data to {tempdir} (this may take a while)") |
| for i in range(5): |
| im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1) |
| n = nib.Nifti1Image(im, np.eye(4)) |
| nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz")) |
| n = nib.Nifti1Image(seg, np.eye(4)) |
| nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz")) |
|
|
| images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) |
| segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) |
| val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)] |
|
|
| |
| model_file = glob("./runs/net_key_metric*")[0] |
|
|
| |
| val_transforms = Compose( |
| [ |
| LoadNiftid(keys=["image", "label"]), |
| AsChannelFirstd(keys=["image", "label"], channel_dim=-1), |
| ScaleIntensityd(keys="image"), |
| ToTensord(keys=["image", "label"]), |
| ] |
| ) |
|
|
| |
| val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) |
| val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4) |
|
|
| |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| net = monai.networks.nets.UNet( |
| dimensions=3, |
| in_channels=1, |
| out_channels=1, |
| channels=(16, 32, 64, 128, 256), |
| strides=(2, 2, 2, 2), |
| num_res_units=2, |
| ).to(device) |
|
|
| val_post_transforms = Compose( |
| [ |
| Activationsd(keys="pred", sigmoid=True), |
| AsDiscreted(keys="pred", threshold_values=True), |
| KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]), |
| ] |
| ) |
| val_handlers = [ |
| StatsHandler(output_transform=lambda x: None), |
| CheckpointLoader(load_path=model_file, load_dict={"net": net}), |
| SegmentationSaver( |
| output_dir="./runs/", |
| batch_transform=lambda batch: batch["image_meta_dict"], |
| output_transform=lambda output: output["pred"], |
| ), |
| ] |
|
|
| evaluator = SupervisedEvaluator( |
| device=device, |
| val_data_loader=val_loader, |
| network=net, |
| inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5), |
| post_transform=val_post_transforms, |
| key_val_metric={ |
| "val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"])) |
| }, |
| additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))}, |
| val_handlers=val_handlers, |
| |
| amp=True if monai.config.get_torch_version_tuple() >= (1, 6) else False, |
| ) |
| evaluator.run() |
|
|
|
|
| if __name__ == "__main__": |
| with tempfile.TemporaryDirectory() as tempdir: |
| main(tempdir) |
|
|