# dataset settings - 冠状动脉双分支分割配置
# 数据目录结构:
# dataset/
# ├── images/
# │   ├── training/           # 训练图像
# │   └── validation/         # 验证图像
# └── labels/
#     ├── training/
#     │   ├── lcx/           # LCX(左回旋支)训练标签
#     │   └── lad/           # LAD(左前降支)训练标签
#     └── validation/
#         ├── lcx/           # LCX验证标签
#         └── lad/           # LAD验证标签

dataset_type = 'MainVesselDataset'
data_root = '/media/yw/SDA3/nnUnet_dataset/origin_data_1031/main_segment_data/dataset/'
# data_root = rf'D:\workspace\vessel_segment\dataset' 
img_scale = (512, 512)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadMultiVesselAnnotations'),  # 自定义loader
    # dict(type='RandomFlip', prob=0.5),
    dict(type='PackMultiSegInputs')
]

val_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadMultiVesselAnnotations'),  # 自定义loader
    dict(type='PackMultiSegInputs')
]

test_pipeline = [
    dict(type='LoadImageFromFile'),
    # dict(type='Resize', scale=img_scale, keep_ratio=True),
    # add loading annotation after ``Resize`` because ground truth
    # does not need to do resize data transform
    # dict(type='LoadMultiVesselAnnotations'),
    dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
    dict(type='LoadImageFromFile', backend_args=None),
    dict(
        type='TestTimeAug',
        transforms=[
            [
                dict(type='Resize', scale_factor=r, keep_ratio=True)
                for r in img_ratios
            ],
            [
                dict(type='RandomFlip', prob=0., direction='horizontal'),
                dict(type='RandomFlip', prob=1., direction='horizontal')
            ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
        ])

]
train_dataloader = dict(
    batch_size=8,  # 使用最小批处理大小以避免cuDNN错误
    num_workers=1,  # 使用单个worker避免内存竞争
    persistent_workers=False,  # 关闭持久化worker以减少内存占用
    prefetch_factor=1,  # 最小预取因子
    pin_memory=False,  # 关闭内存锁定以避免内存映射冲突
    drop_last=True,  # 丢弃最后一个不完整的批次
    sampler=dict(type='DefaultSampler', shuffle=True),
    dataset=dict(
        type=dataset_type,
        data_root=data_root,
        data_prefix=dict(
            img_path='images_concat/train',
            seg_map_path=['LCX_mask/train', 'LAD_mask/train']),  # LCX和LAD分离的标签目录
        pipeline=train_pipeline))

val_dataloader = dict(
    batch_size=4,  # 减小验证批处理大小
    num_workers=2,
    persistent_workers=False,  # 关闭持久化worker
    prefetch_factor=1,  # 减少预取因子
    pin_memory=False,  # 暂时关闭内存锁定
    sampler=dict(type='DefaultSampler', shuffle=False),
    dataset=dict(
        type=dataset_type,
        data_root=data_root,
        data_prefix=dict(
            img_path='images_concat/val',
            seg_map_path=['LCX_mask/val', 'LAD_mask/val']),  # LCX和LAD分离的标签目录
        pipeline=val_pipeline))
test_dataloader = val_dataloader

val_evaluator = dict(
    type='MultiHeadSegMetric',
    iou_metrics=['mDice', 'mIoU'],  # 可选指标
    ignore_index=0,
    label_keys=['gt_seg_map_lcx', 'gt_seg_map_lad'],
    pred_keys=['pred_seg_map_head0', 'pred_seg_map_head1'],
    prefixes=['lcx', 'lad'],
    print_background=False   # 默认忽略背景
)


test_evaluator = val_evaluator
