import argparse
import inspect

from . import diffusion
from .respace import SpacedDiffusion, space_timesteps
from .UNetModel import UNetModel

def diffusion_defaults():
    """
    Defaults for Hi-C diffusion training.
    严格 DDPM 配置：
    - 预测噪声 epsilon (predict_xstart=False)
    - 固定大方差 (learn_sigma=False, 默认 FIXED_LARGE)
    - 线性 beta schedule
    - 1000 步扩散
    """
    return dict(
        learn_sigma=False,
        diffusion_steps=1000,
        noise_schedule="linear",
        timestep_respacing="",
        use_kl=False,
        predict_xstart=False,  # DDPM 标准：预测噪声
        rescale_timesteps=False,
        rescale_learned_sigmas=False,
    )

def model_and_diffusion_defaults():
    """
    Defaults for Hi-C model and diffusion training.
    """
    res = dict(
        image_size=128,           # Hi-C window size (will be overridden by window_size)
        in_channels=1,            # Single channel Hi-C matrix
        model_channels=128,       # Base number of channels
        out_channels=1,           # Output single channel
        num_res_blocks=2,         # Number of residual blocks per resolution
        attention_resolutions="32,16,8",  # Resolutions where to apply attention
        dropout=0.0,
        channel_mult="1,2,4,8",   # Channel multipliers for each resolution
        conv_resample=True,
        dims=2,                   # 2D convolutions
        num_classes=None,         # No class conditioning by default
        use_checkpoint=False,
        use_fp16=False,
        num_heads=4,
        num_head_channels=-1,
        num_heads_upsample=-1,
        use_scale_shift_norm=True,
        resblock_updown=False,
        use_new_attention_order=False,
        use_spatial_transformer=False,
        transformer_depth=1,
        context_dim=None,
        time_cond_dim=128,        # Biological time conditioning dimension
        use_transition_anchors=True,  # Learn early/late anchors and interpolate by stage
        cond_drop_prob=0.0,       # Classifier-free conditioning dropout prob
        sem_cond_dim=256,         # Semantic conditioning dimension for zsem
        use_semantic_encoder=True, # Whether to use built-in semantic encoder
        sem_drop_prob=0.1,        # Classifier-free dropout for semantic conditioning
        use_vae_encoder=False,    # Whether to use VAE encoder for semantic encoding
        vae_encoder_path='',      # Path to pre-trained VAE encoder
        freeze_vae_encoder=True,  # Whether to freeze VAE encoder weights
    )
    res.update(diffusion_defaults())
    return res

def hic_data_defaults():
    """
    Defaults for Hi-C data processing.
    """
    return dict(
        data_dir="",
        timepoints="G1,late_S1",
        file_format="npz",        # 'cool', 'txt', 'npy', 'npz'
        txt_format="auto",        # 'auto', 'pairs', 'matrix' for txt files
        chromosome="chr1",        # Specific chromosome to load (e.g., 'chr1')
        chromosomes=None,         # List of chromosomes for pairs format
        resolution=50000,        # Hi-C resolution in bp
        window_size=128,          # Window size for sampling
        matrix_size=128,          # Deprecated, use window_size instead
        normalization="ice",      # 'ice', 'zscore', 'log', 'none'
        log_transform=True,
        interpolation_prob=0.3,   # Probability of generating interpolated samples
        augmentation=False,       # Data augmentation
        random_window=True,       # Whether to randomly sample window positions
        use_grid_windows=True,   # Iterate all windows deterministically
        window_stride=128,       # Stride for grid windows (defaults to window_size)
        band_limit_bp=5_000_000,  # Near-diagonal limit in base pairs (default 5Mb)
        band_limit_bins=None,     # Override in bins; if set, ignores band_limit_bp
        output_range="0,1",      # 输出范围："0,1" 或 "-1,1"
    )

def training_defaults():
    """
    Defaults for training parameters.
    """
    return dict(
        batch_size=4,
        microbatch=-1,            # -1 disables microbatches
        lr=1e-4,
        weight_decay=0.0,
        lr_anneal_steps=0,
        ema_rate="0.9999",
        log_interval=100,
        save_interval=1000,
        resume_checkpoint="",
        use_fp16=False,
        fp16_scale_growth=1e-3,
        schedule_sampler="uniform",
        logger_path="",
        # Multi-timestep training (accumulate losses over multiple t per batch)
        multi_t_training=True,         # 默认开启多步训练，提升时间覆盖
        num_t_per_batch=11,             # 每个 microbatch 覆盖 8 个时间步
        t_multi_strategy="linspace",   # 'linspace' or 'random'
        t_stride=100,                    # if >0, prefer t in {0, t_stride, 2*t_stride, ...}
    )

def create_model_and_diffusion(
    image_size,
    in_channels,
    model_channels,
    out_channels,
    num_res_blocks,
    attention_resolutions,
    dropout,
    channel_mult,
    conv_resample,
    dims,
    num_classes,
    use_checkpoint,
    use_fp16,
    num_heads,
    num_head_channels,
    num_heads_upsample,
    use_scale_shift_norm,
    resblock_updown,
    use_new_attention_order,
    use_spatial_transformer,
    transformer_depth,
    context_dim,
    time_cond_dim,
    use_transition_anchors,
    cond_drop_prob,
    sem_cond_dim,
    use_semantic_encoder,
    sem_drop_prob,
    use_vae_encoder,
    vae_encoder_path,
    freeze_vae_encoder,
    learn_sigma,
    diffusion_steps,
    noise_schedule,
    timestep_respacing,
    use_kl,
    predict_xstart,
    rescale_timesteps,
    rescale_learned_sigmas,
):
    model = create_model(
        image_size=image_size,
        in_channels=in_channels,
        model_channels=model_channels,
        out_channels=out_channels,
        num_res_blocks=num_res_blocks,
        attention_resolutions=attention_resolutions,
        dropout=dropout,
        channel_mult=channel_mult,
        conv_resample=conv_resample,
        dims=dims,
        num_classes=num_classes,
        use_checkpoint=use_checkpoint,
        use_fp16=use_fp16,
        num_heads=num_heads,
        num_head_channels=num_head_channels,
        num_heads_upsample=num_heads_upsample,
        use_scale_shift_norm=use_scale_shift_norm,
        resblock_updown=resblock_updown,
        use_new_attention_order=use_new_attention_order,
        use_spatial_transformer=use_spatial_transformer,
        transformer_depth=transformer_depth,
        context_dim=context_dim,
        time_cond_dim=time_cond_dim,
        learn_sigma=learn_sigma,
        use_transition_anchors=use_transition_anchors,
        cond_drop_prob=cond_drop_prob,
        sem_cond_dim=sem_cond_dim,
        use_semantic_encoder=use_semantic_encoder,
        sem_drop_prob=sem_drop_prob,
        use_vae_encoder=use_vae_encoder,
        vae_encoder_path=vae_encoder_path,
        freeze_vae_encoder=freeze_vae_encoder,
    )
    diffusion = create_gaussian_diffusion(
        steps=diffusion_steps,
        learn_sigma=learn_sigma,
        noise_schedule=noise_schedule,
        use_kl=use_kl,
        predict_xstart=predict_xstart,
        rescale_timesteps=rescale_timesteps,
        rescale_learned_sigmas=rescale_learned_sigmas,
        timestep_respacing=timestep_respacing,
    )
    return model, diffusion

def create_model(
    image_size,
    in_channels,
    model_channels,
    out_channels,
    num_res_blocks,
    attention_resolutions,
    dropout=0,
    channel_mult=(1, 2, 4, 8),
    conv_resample=True,
    dims=2,
    num_classes=None,
    use_checkpoint=False,
    use_fp16=False,
    num_heads=4,
    num_head_channels=-1,
    num_heads_upsample=-1,
    use_scale_shift_norm=False,
    resblock_updown=False,
    use_new_attention_order=False,
    use_spatial_transformer=False,
    transformer_depth=1,
    context_dim=None,
    time_cond_dim=128,
    learn_sigma=False,
    use_transition_anchors=True,
    cond_drop_prob=0.0,
    sem_cond_dim=256,
    use_semantic_encoder=True,
    use_vae_encoder=False,
    vae_encoder_path='',
    freeze_vae_encoder=True,
    sem_drop_prob=0.1,
):
    if channel_mult == "":
        if image_size == 512:
            channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
        elif image_size == 256:
            channel_mult = (1, 1, 2, 2, 4, 4)
        elif image_size == 128:
            channel_mult = (1, 1, 2, 3, 4)
        elif image_size == 64:
            channel_mult = (1, 2, 3, 4)
        else:
            raise ValueError(f"unsupported image size: {image_size}")
    else:
        channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))

    attention_ds = []
    for res in attention_resolutions.split(","):
        attention_ds.append(image_size // int(res))

    return UNetModel(
        image_size=image_size,
        in_channels=in_channels,
        model_channels=model_channels,
        out_channels=(out_channels if not learn_sigma else out_channels * 2),
        num_res_blocks=num_res_blocks,
        attention_resolutions=tuple(attention_ds),
        dropout=dropout,
        channel_mult=channel_mult,
        conv_resample=conv_resample,
        dims=dims,
        num_classes=num_classes,
        use_checkpoint=use_checkpoint,
        use_fp16=use_fp16,
        num_heads=num_heads,
        num_head_channels=num_head_channels,
        num_heads_upsample=num_heads_upsample,
        use_scale_shift_norm=use_scale_shift_norm,
        resblock_updown=resblock_updown,
        use_new_attention_order=use_new_attention_order,
        use_spatial_transformer=use_spatial_transformer,
        transformer_depth=transformer_depth,
        context_dim=context_dim,
        time_cond_dim=time_cond_dim,
        use_transition_anchors=use_transition_anchors,
        cond_drop_prob=cond_drop_prob,
        sem_cond_dim=sem_cond_dim,
        use_semantic_encoder=use_semantic_encoder,
        sem_drop_prob=sem_drop_prob,
        use_vae_encoder=use_vae_encoder,
        vae_encoder_path=vae_encoder_path,
        freeze_vae_encoder=freeze_vae_encoder,
    )

def create_gaussian_diffusion(
    *,
    steps=1000,
    learn_sigma=False,
    sigma_small=False,
    noise_schedule="linear",
    use_kl=False,
    predict_xstart=False,
    rescale_timesteps=False,
    rescale_learned_sigmas=False,
    timestep_respacing="",
):
    betas = diffusion.get_named_beta_schedule(noise_schedule, steps)
    if use_kl:
        loss_type = diffusion.LossType.RESCALED_KL
    elif rescale_learned_sigmas:
        loss_type = diffusion.LossType.RESCALED_MSE
    else:
        loss_type = diffusion.LossType.MSE
    if not timestep_respacing:
        timestep_respacing = [steps]
    return SpacedDiffusion(
        use_timesteps=space_timesteps(steps, timestep_respacing),
        betas=betas,
        model_mean_type=(
            diffusion.ModelMeanType.EPSILON if not predict_xstart else diffusion.ModelMeanType.START_X
        ),
        model_var_type=(
            (
                diffusion.ModelVarType.FIXED_LARGE
                if not sigma_small
                else diffusion.ModelVarType.FIXED_SMALL
            )
            if not learn_sigma
            else diffusion.ModelVarType.LEARNED_RANGE
        ),
        loss_type=loss_type,
        rescale_timesteps=rescale_timesteps,
    )

def add_dict_to_argparser(parser, default_dict):
    for k, v in default_dict.items():
        v_type = type(v)
        if v is None:
            v_type = str
        elif isinstance(v, bool):
            v_type = str2bool
        parser.add_argument(f"--{k}", default=v, type=v_type)

def args_to_dict(args, keys):
    if isinstance(args, dict):
        return {k: args[k] for k in keys if k in args}
    else:
        return {k: getattr(args, k) for k in keys if hasattr(args, k)}

def str2bool(v):
    """
    https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
    """
    if isinstance(v, bool):
        return v
    if v.lower() in ("yes", "true", "t", "y", "1"):
        return True
    elif v.lower() in ("no", "false", "f", "n", "0"):
        return False
    else:
        raise argparse.ArgumentTypeError("boolean value expected")

def get_all_defaults():
    """Get all default arguments for Hi-C model training."""
    defaults = {}
    defaults.update(model_and_diffusion_defaults())
    defaults.update(hic_data_defaults())
    defaults.update(training_defaults())
    return defaults
