from mmengine.config import read_base
with read_base():
    from ..base_dataset_aitrox import *

from torch.optim.adamw import AdamW
from torch.distributed.fsdp.api import ShardingStrategy

# mmengine
from mmengine.model.base_model import BaseDataPreprocessor
from mmengine.hooks import (
    DistSamplerSeedHook, IterTimerHook, ParamSchedulerHook, 
    CheckpointHook, RuntimeInfoHook)
from mmengine.runner import IterBasedTrainLoop, ValLoop
from mmengine.optim.scheduler import LinearLR, PolyLR
from mmengine.optim import OptimWrapper, AmpOptimWrapper
from mmengine.model.wrappers import MMFullyShardedDataParallel
from mmengine._strategy.deepspeed import DeepSpeedOptimWrapper, DeepSpeedStrategy
from mmengine.dataset import InfiniteSampler

# customize
from mgamdata.mm.mmeng_PlugIn import (
    RemasteredDDP, LoggerJSON, GeneralVisHook, 
    RatioSampler, multi_sample_collate, RemasteredFSDP_Strategy, 
    mgam_LocalVisBackend, mgam_TensorboardVisBackend)
from mgamdata.dataset.base import mgam_concat_dataset
from mgamdata.process.GeneralPreProcess import (
    NewAxis, WindowSet, RandomDiscreteErase, RandomAxis, RandomFlip3D)
from mgamdata.process.LoadBiomedicalData import LoadCTPreCroppedSampleFromNpz
from mgamdata.models.ReconSelfSup import PackReconInput, ReconMetric, ReconViser


# --------------------PARAMETERS-------------------- #
debug = False  # 调试模式
use_AMP = True  # AMP加速
dist = True if not debug else False  # 分布式使能
MP_mode = "ddp"  # 分布式计算模式 Literal["ddp", "fsdp", "deepspeed"]
Compile = True if not debug else False  # torch.dynamo
workers = 4 if not debug else 0  # DataLoader Worker

# Starting
resume = True
load_from = None
resume_optimizer = True
resume_param_scheduler = False

# 神经网络超参
lr = 2e-4
batch_size = 8 if not debug else 3
grad_accumulation = 1
size = (48,48,48)  # 单次前向处理的分辨率, 不限制推理
min_erase_ratio = 0.9
max_erase_ratio = 0.99

# 流程控制
iters = 1000000 if not debug else 3
logger_interval = 50 if not debug else 1
val_interval = 10000 if not debug else 2
vis_interval = logger_interval * 10 if not debug else 1
save_interval = val_interval if not debug else 2
dynamic_intervals = None
# dynamic_intervals = [ # 动态验证间隔
#     (10, 10), 
#     (val_interval, 20) 
# ]

# Dataset
wl = 200
ww = 800
pad_val = 0
seg_pad_val = 0

# --------------------PARAMETERS-------------------- #
# ////////////////////////////////////////////////// #
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ #
# --------------------COMPONENTS-------------------- #

# 数据读取与预处理管线
train_pipeline = [
    dict(type=LoadCTPreCroppedSampleFromNpz, load_type=['img']), 
    dict(type=WindowSet, location=wl, width=ww), 
    dict(type=RandomAxis, axis=(0,1)),
    dict(type=RandomAxis, axis=(1,2)),
    dict(type=RandomAxis, axis=(0,2)),
    dict(type=RandomFlip3D, axis=0),
    dict(type=RandomFlip3D, axis=1),
    dict(type=RandomFlip3D, axis=2),
    dict(type=RandomDiscreteErase,
         min_ratio=min_erase_ratio,
         max_ratio=max_erase_ratio,
         keys_pad_vals=[('img', pad_val)]),
    dict(type=NewAxis, 
         axis=0, 
         keys=['img', 'ori_img', 'erase_mask']), 
    dict(type=PackReconInput)
]

val_pipeline = [
    dict(type=LoadCTPreCroppedSampleFromNpz, load_type=['img']), 
    dict(type=WindowSet, location=wl, width=ww), 
    dict(type=RandomDiscreteErase,
         min_ratio=min_erase_ratio,
         max_ratio=max_erase_ratio,
         keys_pad_vals=[('img', pad_val)]),
    dict(type=NewAxis, 
         axis=0, 
         keys=['img', 'ori_img', 'erase_mask']), 
    dict(type=PackReconInput)
]

ds = [ 
    Sar_Crop48, 
    Ts_Crop48, 
    FLARE23_Crop48, 
    KiTS23_Crop48, 
    AbCT1K_Crop48, 
    LUNA16_Crop48, 
]
if debug:
    ds = ds[0:1]
multi_dataset_Tr = []
for d in ds:
    d.update(split="train",
             mode="unsup",
             pipeline=train_pipeline,
             debug=debug)
    multi_dataset_Tr.append(d.copy())

train_dataloader = dict(
    batch_size=batch_size,
    num_workers=workers,
    drop_last=False if debug else True,
    pin_memory=True,
    persistent_workers=True if workers > 0 else False,
    sampler=dict(type=InfiniteSampler, shuffle=False if debug else True),
    collate_fn=dict(type=multi_sample_collate),
    dataset=dict(
        type=mgam_concat_dataset, 
        datasets=multi_dataset_Tr,
    ),
)

multi_dataset_Ts = []
for d in ds:
    d.update(split="val",
             pipeline=val_pipeline,
             debug=debug)
    multi_dataset_Ts.append(d.copy())
val_dataloader = dict(
    batch_size=batch_size,
    num_workers=workers,
    drop_last=False if debug else True,
    pin_memory=True,
    persistent_workers=True if workers > 0 else False,
    sampler=dict(type=RatioSampler, 
                 use_sample_ratio=0.025,
                 shuffle=False if debug else True),
    collate_fn=dict(type=multi_sample_collate),
    dataset=dict(
        type=mgam_concat_dataset, 
        datasets=multi_dataset_Ts,
    ),
)

data_preprocessor = dict(
    type=BaseDataPreprocessor,
    non_blocking=False,
)

val_evaluator = dict(type=ReconMetric, prefix='Perf')

# 训练策略
train_cfg = dict(
    type=IterBasedTrainLoop,
    max_iters=iters,
    val_interval = val_interval,
    dynamic_intervals=dynamic_intervals,
)
val_cfg = dict(type=ValLoop, fp16=True)

# 优化器
if MP_mode == "deepspeed" and dist:
    optim_wrapper = dict(
        type=DeepSpeedOptimWrapper,
        optimizer=dict(type=AdamW, lr=lr, weight_decay=1e-2),
        accumulative_counts=grad_accumulation,
    )
else:
    optim_wrapper = dict(
        type=AmpOptimWrapper if use_AMP else OptimWrapper,
        accumulative_counts=grad_accumulation,
        optimizer=dict(type=AdamW, lr=lr, weight_decay=1e-2),
        clip_grad=dict(max_norm=5, norm_type=2, error_if_nonfinite=False),
    )
if use_AMP and dist and MP_mode=='fsdp':
    optim_wrapper["use_fsdp"] = True

# 学习率调整策略
param_scheduler = [
    dict(
        type=LinearLR,
        start_factor=1e-2,
        end=iters * 0.01,
        by_epoch=False,
    ),
    dict(
        type=PolyLR,
        eta_min=lr * 5e-2,
        power=0.6,
        begin=0.3 * iters,
        end=0.95 * iters,
        by_epoch=False,
    ),
] if not debug else []

default_hooks = dict(
    runtime_info=dict(type=RuntimeInfoHook), 
    timer=dict(type=IterTimerHook), 
    logger=dict(type=LoggerJSON, interval=logger_interval, log_metric_by_epoch=False), 
    param_scheduler=dict(type=ParamSchedulerHook), 
    checkpoint=dict(
        type=CheckpointHook, 
        by_epoch=False, 
        max_keep_ckpts=1, 
        interval=save_interval if not debug else -1, 
        save_best="Perf/mae" if not debug else None, 
        rule="less" if not debug else None, 
        save_last=True, 
    ),
    sampler_seed=dict(type=DistSamplerSeedHook), 
    visualization=dict(type=GeneralVisHook, 
                       interval=vis_interval), 
)

# torch.dynamo
compile = dict(
    fullgraph=False,
    dynamic=False,
    disable=not Compile,
)

# 分布式训练
runner_type = "mgam_Runner"
if dist:
    launcher = "pytorch"
    if MP_mode == "deepspeed":
        strategy = dict(
            type=DeepSpeedStrategy,
            fp16=dict(
                enabled=True,
                auto_cast=True,
                fp16_master_weights_and_grads=False,
                loss_scale=0,
                loss_scale_window=500,
                hysteresis=2,
                min_loss_scale=1,
                initial_scale_power=15,
            ),
            inputs_to_half=None,
            zero_optimization=dict(
                stage=3,
                allgather_partitions=True,
                reduce_scatter=True,
                allgather_bucket_size=5e7,
                reduce_bucket_size=5e7, # 1e6 available
                overlap_comm=True,
                contiguous_gradients=True,
                cpu_offload=False,
                ignore_unused_parameters=True,
                stage3_gather_16bit_weights_on_model_save=True),
        )
    elif MP_mode == "ddp":
        model_wrapper_cfg = dict(type=RemasteredDDP)
    elif MP_mode == "fsdp":
        strategy = dict(
            type=RemasteredFSDP_Strategy,
            model_wrapper=dict(
                type=MMFullyShardedDataParallel, 
                use_orig_params=True, 
                sharding_strategy=ShardingStrategy.FULL_SHARD,
            ),
        )

else:
    launcher = "none"

# 运行环境
env_cfg = dict(
    # 子进程中使用CUDA的话必须使用spawn, 需要保证所有参数可pickle
    # 一般情况下可以使用fork, 可以共享内存空间
    mp_cfg=dict(mp_start_method="fork", opencv_num_threads=1),
    dist_cfg=dict(backend="nccl"),
    allow_tf32=True,
    benchmark=True,
    allow_fp16_reduced_precision_reduction=True,
    allow_bf16_reduced_precision_reduction=True,
    dynamo_cache_size=2,
    dynamo_supress_errors=False,
    dynamo_logging_level="ERROR",
    torch_logging_level="ERROR",
)

vis_backends = [dict(type=mgam_LocalVisBackend), 
                dict(type=mgam_TensorboardVisBackend)]
visualizer = dict(type=ReconViser, vis_backends=vis_backends)
log_processor = dict(by_epoch=False)
log_level = "INFO"
tta_model = None
