from mmengine.config import read_base
with read_base():
    from ..base_dataset_aitrox import *

import numpy as np
from torch.optim.adamw import AdamW
from torch.distributed.fsdp.api import ShardingStrategy

# mmengine
from mmengine.model.base_model import BaseDataPreprocessor
from mmengine.hooks import DistSamplerSeedHook, IterTimerHook, ParamSchedulerHook, CheckpointHook
from mmengine.runner import IterBasedTrainLoop, ValLoop
from mmengine.optim.scheduler import LinearLR, PolyLR
from mmengine.optim import OptimWrapper, AmpOptimWrapper
from mmengine.model.wrappers import MMFullyShardedDataParallel
from mmengine._strategy.deepspeed import DeepSpeedOptimWrapper, DeepSpeedStrategy
from mmengine.dataset import InfiniteSampler
from mmpretrain.datasets import PackInputs

# customize
from mgamdata.mm.mmeng_PlugIn import (
    RemasteredDDP, RemasteredFSDP, LoggerJSON, RuntimeInfoHook, 
    RatioSampler, multi_sample_collate, RemasteredFSDP_Strategy,
    mgam_LocalVisBackend, mgam_TensorboardVisBackend, GeneralVisHook)
from mgamdata.dataset.base import mgam_concat_dataset
from mgamdata.process.GeneralPreProcess import (
    TypeConvert, NewAxis, AutoPad, SampleAugment, WindowSet,
    RandomAxis, RandomFlip3D, RandomRotate3D, CenterCrop3D)
from mgamdata.process.LoadBiomedicalData import LoadImageFromMHA
from mgamdata.models.FastSlow import (
    RandomVolumeView, NormalizeCoord, RelSim_Viser, RelSim_Metric, ParseCoords)


# --------------------PARAMETERS-------------------- #
debug = False  # 调试模式
use_AMP = True  # AMP加速
dist = True if not debug else False  # 分布式使能
MP_mode = "ddp"  # 分布式计算模式 Literal["ddp", "fsdp", "deepspeed"]
Compile = True if not debug else False  # torch.dynamo
workers = 4 if not debug else 0  # DataLoader Worker

# Starting
resume = True
load_from = None
resume_optimizer = True
resume_param_scheduler = True

# 神经网络超参
lr = 1e-4
batch_size = 1 if not debug else 2
sample_in_time_augment = 4
grad_accumulation = 2
embed_dims = 128
in_channels = 1
size = (48,48,48)  # 单次前向处理的分辨率, 不限制推理
block_counts = [2,2,2,2,2,2,2,2,2]
sub_view_size = [s//2 for s in size]
num_views = 5
min_erase_ratio = 0.7
max_erase_ratio = 0.99
deep_supervision = True
nir_checkpoint = True
MedNeXt_checkpoint = False  # torch.checkpoint

# 流程控制
iters = 200000 if not debug else 3
logger_interval = 100 if not debug else 1
val_interval = 5000 if not debug else 2
vis_interval = logger_interval * 10 if not debug else 1
save_interval = 5000 if not debug else 2
dynamic_intervals = None
# dynamic_intervals = [ # 动态验证间隔
#     (10, 10), 
#     (val_interval, 20) 
# ]

# Dataset
wl = 200
ww = 800
pad_val = 0
seg_pad_val = 0
coord_norm = [256,256,256]

# --------------------PARAMETERS-------------------- #
# ////////////////////////////////////////////////// #
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #
# --------------------COMPONENTS-------------------- #

# 数据读取与预处理管线
# volume-wise augment, be CAUTIOUS to use.
volume_augment = [
    dict(type=RandomAxis, axis=(0,1)),
    dict(type=RandomAxis, axis=(1,2)),
    dict(type=RandomAxis, axis=(0,2)),
    dict(type=RandomFlip3D, axis=0),
    dict(type=RandomFlip3D, axis=1),
    dict(type=RandomFlip3D, axis=2),
    dict(type=RandomRotate3D, 
         degree=180, 
         keys=['img']),
]
# In-Time Augmentation
MultiView_SelfSup_Pipeline = dict(
    type=SampleAugment, 
    num_samples=sample_in_time_augment,
    pipeline=[
        dict(type=RandomVolumeView, 
                num_views=num_views, 
                dim='3d', 
                size=size), 
        dict(type=NormalizeCoord, 
                div=coord_norm), 
        dict(type=ParseCoords, 
                view_size=size, 
                sub_view_size=sub_view_size),
        dict(type=TypeConvert, 
                key=['img'], 
                dtype=np.float16), 
        dict(type=PackInputs, 
             algorithm_keys=[
                "view_coords", "normed_view_coords", "normed_abs_gap", 
                "abs_gap", "sim_pair_indices", "sim_pair_centers", "volume", 
                "normed_sim_pair_indices", "normed_sim_pair_centers"]),
    ]
)

train_pipeline = [
    dict(type=LoadImageFromMHA), 
    dict(type=WindowSet, 
         location=wl, 
         width=ww), 
    # This is to allow sample patches from volume,
    # in which case the volume must be greater than 
    # the size of the patch.
    # Otherwise, there will be only one patching method.
    dict(type=AutoPad, 
         size=[s*4//3 for s in size], 
         dim='3d', 
         pad_val=pad_val, 
         pad_label_val=seg_pad_val), 
    dict(type=NewAxis, axis=0), 
    MultiView_SelfSup_Pipeline,
]

val_pipeline = [
    dict(type=LoadImageFromMHA), 
    dict(type=WindowSet, 
         location=wl, 
         width=ww), 
    dict(type=AutoPad, 
         size=size, 
         dim='3d', 
         pad_val=pad_val, 
         pad_label_val=seg_pad_val,), 
    dict(type=CenterCrop3D, 
         size=size, 
         keys=['img']),
    dict(type=NewAxis, axis=0), 
    MultiView_SelfSup_Pipeline,
]

multi_dataset = [
    SarcopeniaDataset, 
    TsDataset, 
    FLARE2023Dataset, 
    KiTS23_Dataset, 
    AbdomenCT1K_Dataset, 
    LUNA16_Dataset, 
]

if debug:
    multi_dataset = multi_dataset[0:1]

multi_dataset_Tr = multi_dataset_Ts = []
for d in multi_dataset:
    d.update(
        split="train",
        pipeline=train_pipeline,
        debug=debug
    )
    multi_dataset_Tr.append(d.copy())
    d.update(
        split="val",
        pipeline=val_pipeline,
        debug=debug
    )
    multi_dataset_Ts.append(d.copy())

train_dataloader = dict(
    batch_size=batch_size,
    num_workers=workers,
    drop_last=False if debug else True,
    pin_memory=True,
    persistent_workers=True if workers > 0 else False,
    sampler=dict(type=InfiniteSampler, shuffle=False if debug else True),
    collate_fn=dict(type=multi_sample_collate),
    dataset=dict(
        type=mgam_concat_dataset, 
        datasets=multi_dataset_Tr,
    ),
)
val_dataloader = dict(
    batch_size=batch_size,
    num_workers=workers,
    drop_last=False if debug else True,
    pin_memory=True,
    persistent_workers=True if workers > 0 else False,
    sampler=dict(type=RatioSampler, 
                 use_sample_ratio=0.025,
                 shuffle=False if debug else True),
    collate_fn=dict(type=multi_sample_collate),
    dataset=dict(
        type=mgam_concat_dataset, 
        datasets=multi_dataset_Ts,
    ),
)

data_preprocessor = dict(
    type=BaseDataPreprocessor,
    non_blocking=False,
)

val_evaluator = dict(
    type=RelSim_Metric, 
    prefix='Perf'
)

# 训练策略
train_cfg = dict(
    type=IterBasedTrainLoop,
    max_iters=iters,
    val_interval = val_interval,
    dynamic_intervals=dynamic_intervals,
)
val_cfg = dict(
    type=ValLoop, 
    fp16=True, 
)

# 优化器
if MP_mode == "deepspeed" and dist:
    optim_wrapper = dict(
        type=DeepSpeedOptimWrapper,
        optimizer=dict(type=AdamW, lr=lr, weight_decay=1e-2),
        accumulative_counts=grad_accumulation,
    )
else:
    optim_wrapper = dict(
        type=AmpOptimWrapper if use_AMP else OptimWrapper,
        accumulative_counts=grad_accumulation,
        optimizer=dict(type=AdamW, lr=lr, weight_decay=1e-2),
        clip_grad=dict(max_norm=5, norm_type=2, error_if_nonfinite=False),
    )
if use_AMP and dist and MP_mode=='fsdp':
    optim_wrapper["use_fsdp"] = True

# 学习率调整策略
param_scheduler = [
    dict(
        type=LinearLR,
        start_factor=1e-2,
        end=iters * 0.01,
        by_epoch=False,
    ),
    dict(
        type=PolyLR,
        eta_min=lr * 5e-2,
        power=0.6,
        begin=0.3 * iters,
        end=0.95 * iters,
        by_epoch=False,
    ),
] if not debug else []

default_hooks = dict(
    runtime_info=dict(type=RuntimeInfoHook), 
    timer=dict(type=IterTimerHook), 
    logger=dict(type=LoggerJSON, interval=logger_interval, log_metric_by_epoch=False), 
    param_scheduler=dict(type=ParamSchedulerHook), 
    checkpoint=dict(
        type=CheckpointHook, 
        by_epoch=False, 
        max_keep_ckpts=1, 
        interval=save_interval if not debug else -1, 
        save_best="Perf/all_loss", 
        rule="less" if not debug else None, 
        save_last=True, 
    ),
    sampler_seed=dict(type=DistSamplerSeedHook), 
    visualization=dict(type=GeneralVisHook, 
                       interval=vis_interval), 
)

# torch.dynamo
compile = dict(
    fullgraph=False,
    dynamic=False,
    disable=not Compile,
)

# 分布式训练
runner_type = "mgam_Runner"
if dist:
    launcher = "pytorch"
    if MP_mode == "deepspeed":
        strategy = dict(
            type=DeepSpeedStrategy,
            fp16=dict(
                enabled=True,
                auto_cast=True,
                fp16_master_weights_and_grads=False,
                loss_scale=0,
                loss_scale_window=500,
                hysteresis=2,
                min_loss_scale=1,
                initial_scale_power=15,
            ),
            inputs_to_half=None,
            zero_optimization=dict(
                stage=3,
                allgather_partitions=True,
                reduce_scatter=True,
                allgather_bucket_size=5e7,
                reduce_bucket_size=5e7, # 1e6 available
                overlap_comm=True,
                contiguous_gradients=True,
                cpu_offload=False,
                ignore_unused_parameters=True,
                stage3_gather_16bit_weights_on_model_save=True),
        )
    elif MP_mode == "ddp":
        model_wrapper_cfg = dict(type=RemasteredDDP)
    elif MP_mode == "fsdp":
        strategy = dict(
            type=RemasteredFSDP_Strategy,
            model_wrapper=dict(
                type=MMFullyShardedDataParallel, 
                use_orig_params=True, 
                sharding_strategy=ShardingStrategy.FULL_SHARD,
            ),
        )

else:
    launcher = "none"

# 运行环境
env_cfg = dict(
    # 子进程中使用CUDA的话必须使用spawn, 需要保证所有参数可pickle
    # 一般情况下可以使用fork, 可以共享内存空间
    mp_cfg=dict(mp_start_method="fork", opencv_num_threads=1),
    dist_cfg=dict(backend="nccl"),
    allow_tf32=True,
    benchmark=True,
    allow_fp16_reduced_precision_reduction=True,
    allow_bf16_reduced_precision_reduction=True,
    dynamo_cache_size=2,
    dynamo_supress_errors=False,
    dynamo_logging_level="ERROR",
    torch_logging_level="ERROR",
)

vis_backends = [dict(type=mgam_LocalVisBackend), 
                dict(type=mgam_TensorboardVisBackend)]
visualizer = dict(type=RelSim_Viser, 
                  vis_backends=vis_backends, 
                  coord_norm=coord_norm)
log_processor = dict(by_epoch=False)
log_level = "INFO"
tta_model = None
