"""
Hi-C diffusion model inference and temporal interpolation
"""
import os
import argparse
import numpy as np
import torch as th
import matplotlib.pyplot as plt
from datetime import datetime
import glob

from Squidiff import dist_util, logger
from Squidiff.hic_script_util import (
    get_all_defaults,
    create_model_and_diffusion,
    args_to_dict,
    add_dict_to_argparser,
)
from Squidiff.hic_evaluation import (
    evaluate_hic_quality,
    visualize_hic_comparison,
    evaluate_temporal_interpolation
)

def load_model_from_checkpoint(checkpoint_path, args_dict, use_ema=True, device=None):
    """
    Load trained model from checkpoint with automatic EMA/model selection
    
    Args:
        checkpoint_path: path to model checkpoint file or directory
        args_dict: model configuration dictionary
        use_ema: whether to prefer EMA weights over regular model weights
        device: device to load model on
    
    Returns:
        model: loaded model
        diffusion: diffusion process
        checkpoint_info: information about loaded checkpoint
    """
    if device is None:
        device = dist_util.dev()
    
    # Auto-detect checkpoint file if directory is provided
    checkpoint_file = None
    checkpoint_info = {'type': 'unknown', 'step': 0}
    
    if os.path.isdir(checkpoint_path):
        logger.log(f"Searching for checkpoints in directory: {checkpoint_path}")
        
        # Look for EMA checkpoints first if preferred
        if use_ema:
            ema_files = sorted(glob.glob(os.path.join(checkpoint_path, "ema_*.pt")))
            if ema_files:
                checkpoint_file = ema_files[-1]  # Latest EMA checkpoint
                filename = os.path.basename(checkpoint_file)
                # Extract rate and step from filename like ema_0.9999_012345.pt
                parts = filename.replace('.pt', '').split('_')
                if len(parts) >= 3:
                    checkpoint_info = {
                        'type': 'ema',
                        'rate': float(parts[1]),
                        'step': int(parts[2])
                    }
        
        # Fall back to regular model checkpoints
        if checkpoint_file is None:
            model_files = sorted(glob.glob(os.path.join(checkpoint_path, "model*.pt")))
            if model_files:
                checkpoint_file = model_files[-1]  # Latest model checkpoint
                filename = os.path.basename(checkpoint_file)
                # Extract step from filename like model012345.pt
                step_str = filename.replace('model', '').replace('.pt', '')
                if step_str.isdigit():
                    checkpoint_info = {
                        'type': 'model',
                        'step': int(step_str)
                    }
        
        if checkpoint_file is None:
            raise FileNotFoundError(f"No checkpoint files found in {checkpoint_path}")
    
    elif os.path.isfile(checkpoint_path):
        checkpoint_file = checkpoint_path
        filename = os.path.basename(checkpoint_file)
        if 'ema_' in filename:
            parts = filename.replace('.pt', '').split('_')
            if len(parts) >= 3:
                checkpoint_info = {
                    'type': 'ema',
                    'rate': float(parts[1]),
                    'step': int(parts[2])
                }
        elif 'model' in filename:
            step_str = filename.replace('model', '').replace('.pt', '')
            if step_str.isdigit():
                checkpoint_info = {
                    'type': 'model',
                    'step': int(step_str)
                }
    else:
        raise FileNotFoundError(f"Checkpoint path does not exist: {checkpoint_path}")
    
    logger.log(f"Loading checkpoint: {checkpoint_file}")
    logger.log(f"Checkpoint info: {checkpoint_info}")
    
    # Load checkpoint state dict first to inspect keys
    state_dict = th.load(checkpoint_file, map_location='cpu')
    
    # Auto-detect legacy checkpoints (missing new features) and adjust args
    has_bio_time = 'bio_time_embed.0.weight' in state_dict
    has_semantic = 'semantic_encoder.0.weight' in state_dict or 'sem_cond_proj.0.weight' in state_dict
    
    if not has_bio_time:
        logger.log("[AUTO-DETECT] Checkpoint lacks bio_time conditioning. Setting time_cond_dim=0, use_transition_anchors=False")
        args_dict['time_cond_dim'] = 0
        args_dict['use_transition_anchors'] = False
    
    if not has_semantic:
        logger.log("[AUTO-DETECT] Checkpoint lacks semantic encoder. Setting use_semantic_encoder=False, sem_cond_dim=0")
        args_dict['use_semantic_encoder'] = False
        args_dict['sem_cond_dim'] = 0
    
    # Create model and diffusion
    model, diffusion = create_model_and_diffusion(
        **args_to_dict(args_dict, [
            'image_size', 'in_channels', 'model_channels', 'out_channels', 
            'num_res_blocks', 'attention_resolutions', 'dropout', 'channel_mult',
            'conv_resample', 'dims', 'num_classes', 'use_checkpoint', 'use_fp16',
            'num_heads', 'num_head_channels', 'num_heads_upsample', 
            'use_scale_shift_norm', 'resblock_updown', 'use_new_attention_order',
            'use_spatial_transformer', 'transformer_depth', 'context_dim', 
            'time_cond_dim', 'use_transition_anchors', 'cond_drop_prob',
            'sem_cond_dim', 'use_semantic_encoder', 'sem_drop_prob',
            'use_vae_encoder', 'vae_encoder_path', 'freeze_vae_encoder',
            'learn_sigma', 'diffusion_steps', 'noise_schedule',
            'timestep_respacing', 'use_kl', 'predict_xstart', 'rescale_timesteps',
            'rescale_learned_sigmas'
        ])
    )
    
    # Load checkpoint (state_dict already loaded above for inspection)
    model.load_state_dict(state_dict)
    model.to(device)
    model.eval()
    
    logger.log("Model loaded successfully")
    return model, diffusion, checkpoint_info

def sample_hic_batch(model, diffusion, bio_times, matrix_size=256, 
                    ddim_steps=50, eta=0.0, clip_mode='nonneg', 
                    use_fp16=False, device=None, sem_cond=None):
    """
    Sample Hi-C matrices for multiple biological timepoints in one batch
    
    Args:
        model: trained diffusion model
        diffusion: diffusion process
        bio_times: list/array of biological times (0=early, 1=late)
        matrix_size: size of Hi-C matrix
        ddim_steps: number of DDIM sampling steps (e.g., 20, 50, 100, 1000)
                   Set to diffusion.num_timesteps for full ancestral sampling
        eta: DDIM eta parameter (0=deterministic, >0 adds stochasticity)
        clip_mode: clipping mode ('none', 'nonneg', 'unit')
        use_fp16: whether to use mixed precision
        device: device for computation
        sem_cond: semantic conditioning features [batch_size, sem_cond_dim] or None
    
    Returns:
        samples: generated Hi-C matrices [len(bio_times), 1, H, W]
    """
    if device is None:
        device = dist_util.dev()
    
    model.eval()
    batch_size = len(bio_times)
    
    # Setup clipping function
    if clip_mode == 'nonneg':
        clip_fn = lambda x: x.clamp(0,)
    elif clip_mode == 'unit':
        clip_fn = lambda x: x.clamp(-1, 1)
    else:
        clip_fn = lambda x: x
    
    # Create respaced diffusion with custom steps if needed
    if ddim_steps < diffusion.num_timesteps:
        from .respace import SpacedDiffusion, space_timesteps
        
        # Create a new diffusion with respaced timesteps
        use_timesteps = space_timesteps(diffusion.num_timesteps, [ddim_steps])
        respaced_diffusion = SpacedDiffusion(
            use_timesteps=use_timesteps,
            betas=diffusion.betas,
            model_mean_type=diffusion.model_mean_type,
            model_var_type=diffusion.model_var_type,
            loss_type=diffusion.loss_type,
            rescale_timesteps=diffusion.rescale_timesteps,
        )
        logger.log(f"Using DDIM with {ddim_steps} steps (respaced from {diffusion.num_timesteps})")
    else:
        respaced_diffusion = diffusion
        logger.log(f"Using full ancestral sampling with {diffusion.num_timesteps} steps")
    
    # Inference-only context (slightly faster than no_grad)
    with th.inference_mode():
        # Create biological time conditioning tensor
        bio_time_tensor = th.tensor(bio_times, dtype=th.float32, device=device)

        # Prepare additional model kwargs
        model_kwargs = {"bio_time": bio_time_tensor}
        if sem_cond is not None:
            if isinstance(sem_cond, np.ndarray):
                sem_cond = th.tensor(sem_cond, dtype=th.float32, device=device)
            model_kwargs["sem_cond"] = sem_cond

        # Sample initial Gaussian noise
        shape = (batch_size, 1, matrix_size, matrix_size)
        noise = th.randn(shape, device=device)

        # Autocast for mixed precision
        autocast_context = (
            th.cuda.amp.autocast() if use_fp16 and th.cuda.is_available() else th.nullcontext()
        )

        with autocast_context:
            # Use DDIM sampling (works for both respaced and full)
            sample = respaced_diffusion.ddim_sample_loop(
                model,
                shape,
                noise=noise,
                clip_denoised=False,
                denoised_fn=clip_fn,
                model_kwargs=model_kwargs,
                device=device,
                progress=False,
                eta=eta,
            )

    # Return CPU float32 tensor for downstream NumPy compatibility
    return sample.float().cpu()

def compute_semantic_embedding(model, hic_matrix, device=None, use_adaptive_strategy=True):
    """
    Compute semantic embedding (zsem) for a Hi-C matrix using the model's encoder
    Supports adaptive strategies for large matrices
    
    Args:
        model: trained diffusion model with semantic encoder
        hic_matrix: Hi-C matrix [H, W] or [1, H, W]
        device: device for computation
        use_adaptive_strategy: whether to use adaptive encoding for large matrices
    
    Returns:
        zsem: semantic embedding [sem_cond_dim]
    """
    if device is None:
        device = dist_util.dev()
    
    model.eval()
    
    # Ensure proper shape [1, 1, H, W]
    if isinstance(hic_matrix, np.ndarray):
        hic_matrix = th.tensor(hic_matrix, dtype=th.float32)
    
    if hic_matrix.dim() == 2:
        hic_matrix = hic_matrix.unsqueeze(0).unsqueeze(0)  # [1, 1, H, W]
    elif hic_matrix.dim() == 3:
        hic_matrix = hic_matrix.unsqueeze(0)  # [1, C, H, W]
    
    hic_matrix = hic_matrix.to(device)
    H, W = hic_matrix.shape[2], hic_matrix.shape[3]
    
    with th.no_grad():
        # Adaptive strategy for large matrices
        if use_adaptive_strategy and (H > 512 or W > 512):
            logger.log(f"Using adaptive encoding strategy for large matrix: {H}x{W}")
            
            # Strategy 1: Multi-scale encoding
            if H <= 1024 and W <= 1024:
                # Downsample to 128x128 and encode
                downsampled = th.nn.functional.interpolate(
                    hic_matrix, size=(128, 128), mode='bilinear', align_corners=False
                )
                zsem = model.encode_semantic(downsampled)
                
            # Strategy 2: Hierarchical patch-based encoding for very large matrices
            else:
                # Extract multiple patches and average their embeddings
                patch_size = 128
                stride = patch_size // 2  # 50% overlap
                patches = []
                
                for i in range(0, H - patch_size + 1, stride):
                    for j in range(0, W - patch_size + 1, stride):
                        patch = hic_matrix[:, :, i:i+patch_size, j:j+patch_size]
                        patches.append(patch)
                        
                        # Limit to avoid memory issues
                        if len(patches) >= 16:
                            break
                    if len(patches) >= 16:
                        break
                
                if patches:
                    patch_embeddings = []
                    for patch in patches:
                        patch_zsem = model.encode_semantic(patch)
                        patch_embeddings.append(patch_zsem)
                    
                    # Average patch embeddings
                    zsem = th.stack(patch_embeddings).mean(dim=0)
                else:
                    # Fallback: center crop to 128x128
                    start_h = (H - 128) // 2
                    start_w = (W - 128) // 2
                    cropped = hic_matrix[:, :, start_h:start_h+128, start_w:start_w+128]
                    zsem = model.encode_semantic(cropped)
        else:
            # Direct encoding for standard size matrices
            zsem = model.encode_semantic(hic_matrix)  # [1, sem_cond_dim]
        
    return zsem.squeeze(0).cpu().numpy()  # [sem_cond_dim]

def interpolate_semantic_embeddings(zsem_early, zsem_late, alphas):
    """
    Interpolate between early and late semantic embeddings
    
    Args:
        zsem_early: early semantic embedding [sem_cond_dim]
        zsem_late: late semantic embedding [sem_cond_dim]  
        alphas: interpolation factors, 0=early, 1=late
    
    Returns:
        interpolated_zsems: [len(alphas), sem_cond_dim]
    """
    zsem_early = np.array(zsem_early)
    zsem_late = np.array(zsem_late)
    alphas = np.array(alphas)
    
    # Linear interpolation
    interpolated = []
    for alpha in alphas:
        zsem_interp = (1 - alpha) * zsem_early + alpha * zsem_late
        interpolated.append(zsem_interp)
    
    return np.array(interpolated)

def spherical_interpolate_semantic_embeddings(zsem_early, zsem_late, alphas):
    """
    Spherical interpolation (SLERP) between semantic embeddings
    More stable for unit-normalized embeddings
    
    Args:
        zsem_early: early semantic embedding [sem_cond_dim]
        zsem_late: late semantic embedding [sem_cond_dim]
        alphas: interpolation factors, 0=early, 1=late
    
    Returns:
        interpolated_zsems: [len(alphas), sem_cond_dim]
    """
    zsem_early = np.array(zsem_early)
    zsem_late = np.array(zsem_late)
    alphas = np.array(alphas)
    
    # Normalize vectors
    zsem_early = zsem_early / (np.linalg.norm(zsem_early) + 1e-8)
    zsem_late = zsem_late / (np.linalg.norm(zsem_late) + 1e-8)
    
    # Calculate angle between vectors
    dot_product = np.clip(np.dot(zsem_early, zsem_late), -1.0, 1.0)
    omega = np.arccos(dot_product)
    
    interpolated = []
    for alpha in alphas:
        if abs(omega) < 1e-6:  # Vectors are nearly parallel
            zsem_interp = (1 - alpha) * zsem_early + alpha * zsem_late
        else:
            # SLERP formula
            sin_omega = np.sin(omega)
            zsem_interp = (np.sin((1 - alpha) * omega) / sin_omega) * zsem_early + \
                         (np.sin(alpha * omega) / sin_omega) * zsem_late
        interpolated.append(zsem_interp)
    
    return np.array(interpolated)

def sample_with_semantic_guidance(model, diffusion, early_matrix, late_matrix,
                                  target_timepoints, matrix_size=128, num_samples=1,
                                  ddim_steps=50, use_slerp=False, clip_mode='nonneg',
                                  use_fp16=False, device=None):
    """
    Generate Hi-C matrices at target timepoints using semantic guidance
    
    Args:
        model: trained diffusion model with semantic encoder
        diffusion: diffusion process
        early_matrix: Hi-C matrix at early timepoint [H, W]
        late_matrix: Hi-C matrix at late timepoint [H, W]
        target_timepoints: biological timepoints to generate (list of floats 0-1)
        matrix_size: size of generated matrices
        num_samples: number of samples per timepoint
        ddim_steps: number of DDIM sampling steps
        use_slerp: whether to use spherical interpolation
        clip_mode: clipping mode for samples
        use_fp16: whether to use mixed precision
        device: device for computation
    
    Returns:
        results: dict with timepoints as keys and generated samples as values
        zsems: dict with zsem vectors for debugging
    """
    if device is None:
        device = dist_util.dev()
    
    logger.log("Computing semantic embeddings for early and late matrices...")
    
    # Compute semantic embeddings
    zsem_early = compute_semantic_embedding(model, early_matrix, device)
    zsem_late = compute_semantic_embedding(model, late_matrix, device)
    
    logger.log(f"Early zsem shape: {zsem_early.shape}, norm: {np.linalg.norm(zsem_early):.4f}")
    logger.log(f"Late zsem shape: {zsem_late.shape}, norm: {np.linalg.norm(zsem_late):.4f}")
    
    # Interpolate semantic embeddings
    if use_slerp:
        logger.log("Using spherical interpolation (SLERP)")
        zsems_interp = spherical_interpolate_semantic_embeddings(
            zsem_early, zsem_late, target_timepoints
        )
    else:
        logger.log("Using linear interpolation")
        zsems_interp = interpolate_semantic_embeddings(
            zsem_early, zsem_late, target_timepoints
        )
    
    results = {}
    zsems = {
        'early': zsem_early,
        'late': zsem_late,
        'interpolated': {}
    }
    
    # Generate samples for each timepoint
    for i, bio_time in enumerate(target_timepoints):
        logger.log(f"Generating samples for timepoint {bio_time:.3f}...")
        
        # Prepare batch of biological times and semantic embeddings
        bio_times_batch = [bio_time] * num_samples
        zsem_batch = np.tile(zsems_interp[i:i+1], (num_samples, 1))  # [num_samples, sem_cond_dim]
        
        # Generate samples
        samples = sample_hic_batch(
            model, diffusion, bio_times_batch, matrix_size,
            ddim_steps, eta=0.0, clip_mode=clip_mode,
            use_fp16=use_fp16, device=device, sem_cond=zsem_batch
        )
        
        # Convert to numpy and remove channel dimension
        samples_np = samples.numpy()[:, 0]  # [num_samples, H, W]
        
        results[bio_time] = samples_np
        zsems['interpolated'][bio_time] = zsems_interp[i]
        
        logger.log(f"Generated {num_samples} samples for t={bio_time:.3f}, "
                  f"mean_value={samples_np.mean():.4f}, max_value={samples_np.max():.4f}")
    
    return results, zsems

def evaluate_zsem_consistency(model, generated_samples, true_samples, device=None):
    """
    Evaluate consistency between generated and true samples in zsem space
    
    Args:
        model: trained diffusion model with semantic encoder
        generated_samples: dict with timepoints as keys, samples as values
        true_samples: dict with timepoints as keys, true matrices as values
        device: device for computation
    
    Returns:
        evaluation_results: dict with various consistency metrics
    """
    if device is None:
        device = dist_util.dev()
    
    logger.log("Evaluating zsem consistency...")
    
    results = {}
    
    # Compute zsems for all samples
    generated_zsems = {}
    true_zsems = {}
    
    for timepoint in generated_samples:
        # Generated samples zsems
        gen_zsems = []
        for sample in generated_samples[timepoint]:
            zsem = compute_semantic_embedding(model, sample, device)
            gen_zsems.append(zsem)
        generated_zsems[timepoint] = np.array(gen_zsems)
        
        # True samples zsems (if available)
        if timepoint in true_samples:
            true_zsem = compute_semantic_embedding(model, true_samples[timepoint], device)
            true_zsems[timepoint] = true_zsem
    
    # Calculate metrics for each timepoint
    for timepoint in generated_zsems:
        gen_zsems = generated_zsems[timepoint]  # [num_samples, sem_cond_dim]
        gen_mean = np.mean(gen_zsems, axis=0)  # [sem_cond_dim]
        gen_std = np.std(gen_zsems, axis=0)    # [sem_cond_dim]
        
        timepoint_results = {
            'generated_mean': gen_mean,
            'generated_std': gen_std,
            'generated_norm_mean': np.linalg.norm(gen_mean),
            'generated_norm_std': np.linalg.norm(gen_std),
        }
        
        if timepoint in true_zsems:
            true_zsem = true_zsems[timepoint]
            
            # Cosine similarity
            cosine_sim = np.dot(gen_mean, true_zsem) / (
                np.linalg.norm(gen_mean) * np.linalg.norm(true_zsem) + 1e-8
            )
            
            # Euclidean distance
            l2_distance = np.linalg.norm(gen_mean - true_zsem)
            
            # Mean squared error
            mse = np.mean((gen_mean - true_zsem) ** 2)
            
            timepoint_results.update({
                'true_zsem': true_zsem,
                'cosine_similarity': cosine_sim,
                'l2_distance': l2_distance,
                'mse': mse,
            })
            
            logger.log(f"Timepoint {timepoint:.3f}: cosine_sim={cosine_sim:.4f}, "
                      f"l2_dist={l2_distance:.4f}, mse={mse:.6f}")
        
        results[f'timepoint_{timepoint}'] = timepoint_results
    
    # Overall statistics
    if len(true_zsems) > 0:
        all_cosine_sims = [results[f'timepoint_{t}']['cosine_similarity'] 
                          for t in true_zsems.keys()]
        all_l2_distances = [results[f'timepoint_{t}']['l2_distance'] 
                           for t in true_zsems.keys()]
        
        results['overall'] = {
            'mean_cosine_similarity': np.mean(all_cosine_sims),
            'std_cosine_similarity': np.std(all_cosine_sims),
            'mean_l2_distance': np.mean(all_l2_distances),
            'std_l2_distance': np.std(all_l2_distances),
        }
        
        logger.log(f"Overall: mean_cosine_sim={np.mean(all_cosine_sims):.4f}±{np.std(all_cosine_sims):.4f}")
    
    return results

def generate_temporal_series(model, diffusion, timepoints, output_dir, 
                           matrix_size=128, num_samples=1, ddim_steps=50,
                           batch_chunk=16, use_fp16=False, clip_mode='nonneg',
                           early_matrix=None, late_matrix=None, use_semantic_guidance=False,
                           use_slerp=False):
    """
    Generate Hi-C matrices for a series of timepoints using batch processing
    Now supports semantic guidance if early/late matrices are provided
    
    Args:
        model: trained diffusion model
        diffusion: diffusion process
        timepoints: list of biological timepoints
        output_dir: directory to save results
        matrix_size: size of Hi-C matrices
        num_samples: number of samples per timepoint
        ddim_steps: number of DDIM sampling steps
        batch_chunk: maximum batch size for GPU memory
        use_fp16: whether to use mixed precision
        clip_mode: clipping mode for generated samples
        early_matrix: early timepoint matrix for semantic guidance [H, W]
        late_matrix: late timepoint matrix for semantic guidance [H, W]
        use_semantic_guidance: whether to use semantic guidance
        use_slerp: whether to use spherical interpolation for zsem
    
    Returns:
        results: dictionary with timepoints as keys and matrices as values
        zsems: semantic embeddings (if using semantic guidance)
    """
    os.makedirs(output_dir, exist_ok=True)
    
    # Use semantic guidance if early/late matrices provided
    if use_semantic_guidance and early_matrix is not None and late_matrix is not None:
        logger.log("Using semantic guidance for temporal interpolation")
        results, zsems = sample_with_semantic_guidance(
            model, diffusion, early_matrix, late_matrix, timepoints,
            matrix_size, num_samples, ddim_steps, use_slerp, clip_mode, use_fp16
        )
        
        # Save zsem information
        np.save(os.path.join(output_dir, 'zsem_early.npy'), zsems['early'].astype(np.float32))
        np.save(os.path.join(output_dir, 'zsem_late.npy'), zsems['late'].astype(np.float32))
        
        for timepoint, zsem in zsems['interpolated'].items():
            np.save(os.path.join(output_dir, f'zsem_t{timepoint:.3f}.npy'), 
                   zsem.astype(np.float32))
        
    else:
        logger.log("Using standard temporal interpolation (no semantic guidance)")
        zsems = None
        
        # Create all combinations of (timepoint, sample_idx)
        all_jobs = []
        for bio_time in timepoints:
            for sample_idx in range(num_samples):
                all_jobs.append((bio_time, sample_idx))
        
        logger.log(f"Generating {len(all_jobs)} samples across {len(timepoints)} timepoints")
        logger.log(f"Using batch_chunk={batch_chunk}, fp16={use_fp16}, clip_mode={clip_mode}")
        
        # Process in chunks
        all_samples = []
        job_indices = []
        
        for chunk_start in range(0, len(all_jobs), batch_chunk):
            chunk_end = min(chunk_start + batch_chunk, len(all_jobs))
            chunk_jobs = all_jobs[chunk_start:chunk_end]
            
            # Extract bio_times for this chunk
            chunk_bio_times = [job[0] for job in chunk_jobs]
            
            logger.log(f"Processing chunk {chunk_start//batch_chunk + 1}/{(len(all_jobs)-1)//batch_chunk + 1}: "
                      f"samples {chunk_start}-{chunk_end-1}")
            
            # Generate batch
            chunk_samples = sample_hic_batch(
                model, diffusion, chunk_bio_times, matrix_size, 
                ddim_steps, eta=0.0, clip_mode=clip_mode, 
                use_fp16=use_fp16
            )
            
            all_samples.append(chunk_samples)
            job_indices.extend(chunk_jobs)
        
        # Concatenate all samples
        all_samples = th.cat(all_samples, dim=0).numpy()
        
        # Organize results by timepoint
        timepoint_samples = {t: [] for t in timepoints}
        
        for i, (bio_time, sample_idx) in enumerate(job_indices):
            sample = all_samples[i, 0]  # Remove channel dimension [H, W]
            timepoint_samples[bio_time].append(sample)
        
        # Convert to final format
        results = {}
        for bio_time in timepoints:
            samples = np.array(timepoint_samples[bio_time])  # [num_samples, H, W]
            results[bio_time] = samples
    
    # Process and save results for each timepoint
    for bio_time in timepoints:
        samples = results[bio_time]  # [num_samples, H, W]
        
        # Save raw samples (float32)
        np.save(os.path.join(output_dir, f'hic_t{bio_time:.3f}_fp32.npy'), 
               samples.astype(np.float32))
        
        # Average samples if multiple
        if num_samples > 1:
            avg_matrix = samples.mean(axis=0)  # [H, W]
            std_matrix = samples.std(axis=0)   # [H, W] for uncertainty
            
            # Save std as well for uncertainty quantification
            np.save(os.path.join(output_dir, f'hic_t{bio_time:.3f}_std_fp32.npy'),
                   std_matrix.astype(np.float32))
        else:
            avg_matrix = samples[0]  # [H, W]
        
        results[bio_time] = avg_matrix
        
        # Save visualization
        fig, axes = plt.subplots(1, 2 if num_samples > 1 else 1, figsize=(12 if num_samples > 1 else 8, 8))
        if num_samples == 1:
            axes = [axes]  # Make it iterable
        
        # Plot average
        im1 = axes[0].imshow(avg_matrix, cmap='Reds', origin='lower')
        axes[0].set_title(f'Hi-C Matrix at t={bio_time:.3f}')
        axes[0].set_xlabel('Genomic Position')
        axes[0].set_ylabel('Genomic Position')
        plt.colorbar(im1, ax=axes[0], shrink=0.8)
        
        # Plot uncertainty if multiple samples
        if num_samples > 1:
            im2 = axes[1].imshow(std_matrix, cmap='Blues', origin='lower')
            axes[1].set_title(f'Std Dev at t={bio_time:.3f}')
            axes[1].set_xlabel('Genomic Position')
            axes[1].set_ylabel('Genomic Position')
            plt.colorbar(im2, ax=axes[1], shrink=0.8)
        
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, f'hic_t{bio_time:.3f}.png'), 
                   dpi=150, bbox_inches='tight')
        plt.close()
        
        logger.log(f"Completed timepoint {bio_time:.3f}: "
                  f"mean_value={avg_matrix.mean():.4f}, "
                  f"max_value={avg_matrix.max():.4f}")
    
    return results, zsems

def sample_hic_at_timepoint(model, diffusion, bio_time, matrix_size=128, 
                           num_samples=1, ddim_steps=50, eta=0.0, 
                           clip_denoised=True, device=None):
    """
    Sample Hi-C matrix at specific biological timepoint (legacy function)
    
    This function is kept for backward compatibility. For better performance,
    use sample_hic_batch() instead.
    
    Args:
        model: trained diffusion model
        diffusion: diffusion process
        bio_time: biological time (0=early, 1=late)
        matrix_size: size of Hi-C matrix
        num_samples: number of samples to generate
        ddim_steps: number of DDIM sampling steps
        eta: DDIM eta parameter (0=deterministic)
        clip_denoised: whether to clip denoised samples
        device: device for computation
    
    Returns:
        samples: generated Hi-C matrices [num_samples, 1, H, W]
    """
    # Convert to batch call
    bio_times = [bio_time] * num_samples
    clip_mode = 'unit' if clip_denoised else 'nonneg'
    
    samples = sample_hic_batch(
        model, diffusion, bio_times, matrix_size, 
        ddim_steps, eta, clip_mode, use_fp16=False, device=device
    )
    
    return samples.numpy()

def create_temporal_animation(matrices_dict, output_path, fps=5, vmax=None):
    """
    Create animation showing temporal evolution
    
    Args:
        matrices_dict: dictionary with timepoints as keys and matrices as values
        output_path: path to save animation
        fps: frames per second
        vmax: maximum value for color scale
    """
    try:
        import matplotlib.animation as animation
    except ImportError:
        logger.log("matplotlib.animation not available. Skipping animation creation.")
        return
    
    timepoints = sorted(matrices_dict.keys())
    matrices = [matrices_dict[t] for t in timepoints]
    
    if vmax is None:
        vmax = max(m.max() for m in matrices)
    
    fig, ax = plt.subplots(figsize=(8, 8))
    
    # Initialize plot
    im = ax.imshow(matrices[0], cmap='Reds', vmin=0, vmax=vmax, origin='lower')
    ax.set_xlabel('Genomic Position')
    ax.set_ylabel('Genomic Position')
    title = ax.set_title(f'Hi-C Evolution: t={timepoints[0]:.3f}')
    plt.colorbar(im, ax=ax, shrink=0.8)
    
    def animate(frame):
        im.set_array(matrices[frame])
        title.set_text(f'Hi-C Evolution: t={timepoints[frame]:.3f}')
        return [im, title]
    
    anim = animation.FuncAnimation(
        fig, animate, frames=len(matrices), 
        interval=1000//fps, blit=False, repeat=True
    )
    
    # Save animation
    anim.save(output_path, writer='pillow', fps=fps)
    plt.close()
    logger.log(f"Animation saved to {output_path}")

def interpolate_between_real_data(early_matrix, late_matrix, num_steps=10):
    """
    Create linear interpolation between two real Hi-C matrices
    
    Args:
        early_matrix: Hi-C matrix at early timepoint
        late_matrix: Hi-C matrix at late timepoint
        num_steps: number of interpolation steps
    
    Returns:
        interpolated_matrices: list of interpolated matrices
        timepoints: corresponding timepoints
    """
    timepoints = np.linspace(0, 1, num_steps)
    interpolated_matrices = []
    
    for t in timepoints:
        interpolated = (1 - t) * early_matrix + t * late_matrix
        interpolated_matrices.append(interpolated)
    
    return interpolated_matrices, timepoints

def parse_inference_args():
    """Parse command-line arguments for inference"""
    defaults = get_all_defaults()
    
    parser = argparse.ArgumentParser(description='Hi-C Diffusion Model Inference with Semantic Guidance')
    
    # Add inference-specific arguments
    parser.add_argument('--checkpoint_path', type=str, required=True,
                       help='Path to model checkpoint file or directory')
    parser.add_argument('--output_dir', type=str, required=True,
                       help='Output directory for results')
    parser.add_argument('--timepoints', type=str, default='0.0,0.25,0.5,0.75,1.0',
                       help='Comma-separated biological timepoints')
    parser.add_argument('--num_samples', type=int, default=1,
                       help='Number of samples per timepoint')
    parser.add_argument('--ddim_steps', type=int, default=50,
                       help='Number of DDIM sampling steps')
    parser.add_argument('--batch_chunk', type=int, default=16,
                       help='Maximum batch size for GPU memory')
    parser.add_argument('--use_fp16', action='store_true',
                       help='Use mixed precision (fp16) for inference')
    parser.add_argument('--clip_mode', type=str, choices=['none', 'nonneg', 'unit'], 
                       default='nonneg', help='Clipping mode for generated samples')
    parser.add_argument('--use_ema', action='store_true', default=True,
                       help='Prefer EMA weights over regular model weights')
    
    # Semantic guidance arguments
    parser.add_argument('--use_semantic_guidance', action='store_true',
                       help='Use semantic guidance for interpolation')
    parser.add_argument('--early_matrix_path', type=str, default='',
                       help='Path to early timepoint matrix for semantic guidance')
    parser.add_argument('--late_matrix_path', type=str, default='',
                       help='Path to late timepoint matrix for semantic guidance')
    parser.add_argument('--use_slerp', action='store_true',
                       help='Use spherical interpolation (SLERP) for zsem')
    parser.add_argument('--evaluate_zsem', action='store_true',
                       help='Evaluate zsem consistency with true samples')
    parser.add_argument('--true_samples_dir', type=str, default='',
                       help='Directory containing true samples for zsem evaluation')
    
    # Animation and comparison arguments
    parser.add_argument('--create_animation', action='store_true',
                       help='Create temporal animation')
    parser.add_argument('--compare_with_linear', action='store_true',
                       help='Compare with linear interpolation')
    
    # Add model arguments
    add_dict_to_argparser(parser, defaults)
    
    args = parser.parse_args()
    return vars(args)

def main():
    """Main inference function"""
    args = parse_inference_args()
    
    # Setup
    dist_util.setup_dist()
    logger.configure(dir=args['output_dir'])
    
    # Parse timepoints
    timepoints = [float(t) for t in args['timepoints'].split(',')]
    
    # Load model
    model, diffusion, checkpoint_info = load_model_from_checkpoint(
        args['checkpoint_path'], args, use_ema=args['use_ema']
    )
    
    # Log optimization settings
    logger.log(f"=== Inference Configuration ===")
    logger.log(f"Checkpoint: {checkpoint_info}")
    logger.log(f"Timepoints: {timepoints}")
    logger.log(f"Samples per timepoint: {args['num_samples']}")
    logger.log(f"DDIM steps: {args['ddim_steps']}")
    logger.log(f"Batch chunk size: {args['batch_chunk']}")
    logger.log(f"Mixed precision (fp16): {args['use_fp16']}")
    logger.log(f"Clipping mode: {args['clip_mode']}")
    logger.log(f"Matrix size: {args['window_size']}x{args['window_size']}")
    logger.log(f"Semantic guidance: {args['use_semantic_guidance']}")
    
    # Load early and late matrices for semantic guidance if provided
    early_matrix = None
    late_matrix = None
    if args['use_semantic_guidance']:
        if args['early_matrix_path'] and args['late_matrix_path']:
            logger.log("Loading early and late matrices for semantic guidance...")
            early_matrix = np.load(args['early_matrix_path'])
            late_matrix = np.load(args['late_matrix_path'])
            logger.log(f"Early matrix shape: {early_matrix.shape}")
            logger.log(f"Late matrix shape: {late_matrix.shape}")
        else:
            logger.log("Warning: Semantic guidance requested but early/late matrix paths not provided")
            args['use_semantic_guidance'] = False
    
    # Generate temporal series
    logger.log("Generating Hi-C temporal series...")
    results, zsems = generate_temporal_series(
        model, diffusion, timepoints, args['output_dir'],
        matrix_size=args['window_size'],
        num_samples=args['num_samples'],
        ddim_steps=args['ddim_steps'],
        batch_chunk=args['batch_chunk'],
        use_fp16=args['use_fp16'],
        clip_mode=args['clip_mode'],
        early_matrix=early_matrix,
        late_matrix=late_matrix,
        use_semantic_guidance=args['use_semantic_guidance'],
        use_slerp=args['use_slerp']
    )
    
    # Evaluate zsem consistency if requested
    zsem_results = None
    if args['evaluate_zsem'] and zsems is not None:
        logger.log("Evaluating zsem consistency...")
        
        # Load true samples if directory provided
        true_samples = {}
        if args['true_samples_dir'] and os.path.exists(args['true_samples_dir']):
            for timepoint in timepoints:
                true_file = os.path.join(args['true_samples_dir'], f'true_t{timepoint:.3f}.npy')
                if os.path.exists(true_file):
                    true_samples[timepoint] = np.load(true_file)
                    logger.log(f"Loaded true sample for t={timepoint:.3f}")
        
        # Convert results format for evaluation
        generated_samples = {}
        for timepoint in results:
            if len(results[timepoint].shape) == 3:  # [num_samples, H, W]
                generated_samples[timepoint] = results[timepoint]
            else:  # [H, W] -> [1, H, W]
                generated_samples[timepoint] = results[timepoint][np.newaxis, :]
        
        zsem_results = evaluate_zsem_consistency(model, generated_samples, true_samples)
        
        # Save zsem evaluation results
        import json
        zsem_results_serializable = {}
        for key, value in zsem_results.items():
            if isinstance(value, dict):
                zsem_results_serializable[key] = {}
                for k, v in value.items():
                    if isinstance(v, np.ndarray):
                        zsem_results_serializable[key][k] = v.tolist()
                    else:
                        zsem_results_serializable[key][k] = float(v) if np.isscalar(v) else v
            else:
                zsem_results_serializable[key] = value
        
        with open(os.path.join(args['output_dir'], 'zsem_evaluation.json'), 'w') as f:
            json.dump(zsem_results_serializable, f, indent=2)
    
    # Create animation if requested
    if args['create_animation']:
        logger.log("Creating temporal animation...")
        animation_path = os.path.join(args['output_dir'], 'hic_evolution.gif')
        create_temporal_animation(results, animation_path)
    
    # Compare with linear interpolation if matrices provided
    if args['compare_with_linear'] and early_matrix is not None and late_matrix is not None:
        logger.log("Comparing with linear interpolation...")
        
        # Generate linear interpolation
        linear_matrices, linear_timepoints = interpolate_between_real_data(
            early_matrix, late_matrix, len(timepoints)
        )
        
        # Compare
        comparison_dir = os.path.join(args['output_dir'], 'linear_comparison')
        os.makedirs(comparison_dir, exist_ok=True)
        
        for i, t in enumerate(timepoints):
            if t in results:
                generated = results[t]
                linear = linear_matrices[i]
                
                # Calculate metrics
                from Squidiff.hic_evaluation import evaluate_hic_quality, visualize_hic_comparison
                metrics = evaluate_hic_quality(generated, linear, return_all_metrics=True)
                
                # Save metrics
                with open(os.path.join(comparison_dir, f'metrics_t{t:.3f}.txt'), 'w') as f:
                    for key, value in metrics.items():
                        f.write(f'{key}: {value}\n')
                
                # Visualize comparison
                fig = visualize_hic_comparison(
                    generated, linear,
                    save_path=os.path.join(comparison_dir, f'comparison_t{t:.3f}.png')
                )
                plt.close(fig)
    
    # Evaluate temporal consistency
    logger.log("Evaluating temporal consistency...")
    from Squidiff.hic_evaluation import evaluate_temporal_interpolation
    temporal_results = evaluate_temporal_interpolation(results, timepoints, args['output_dir'])
    
    # Calculate performance stats
    total_samples = len(timepoints) * args['num_samples']
    
    # Save summary
    summary_path = os.path.join(args['output_dir'], 'inference_summary.txt')
    with open(summary_path, 'w') as f:
        f.write(f"Hi-C Diffusion Inference Summary with Semantic Guidance\n")
        f.write(f"Generated at: {datetime.now()}\n")
        f.write(f"Checkpoint: {args['checkpoint_path']}\n")
        f.write(f"Checkpoint info: {checkpoint_info}\n")
        f.write(f"Timepoints: {timepoints}\n")
        f.write(f"Matrix size: {args['window_size']}\n")
        f.write(f"Number of samples per timepoint: {args['num_samples']}\n")
        f.write(f"Total samples generated: {total_samples}\n")
        f.write(f"DDIM steps: {args['ddim_steps']}\n")
        f.write(f"Batch chunk size: {args['batch_chunk']}\n")
        f.write(f"Mixed precision: {args['use_fp16']}\n")
        f.write(f"Clipping mode: {args['clip_mode']}\n")
        f.write(f"Semantic guidance: {args['use_semantic_guidance']}\n")
        f.write(f"Use SLERP: {args['use_slerp']}\n\n")
        
        f.write("Features:\n")
        f.write("- Semantic guidance with zsem interpolation\n")
        f.write("- Batch processing for multiple timepoints\n")
        f.write("- Mixed precision training support\n")
        f.write("- Automatic EMA checkpoint detection\n")
        f.write("- Memory-efficient chunking\n")
        f.write("- Uncertainty quantification (std dev)\n")
        f.write("- Float32 output format\n")
        f.write("- zsem consistency evaluation\n\n")
        
        f.write("Temporal consistency results:\n")
        for key, value in temporal_results.items():
            f.write(f"{key}: {value}\n")
        
        if zsem_results and 'overall' in zsem_results:
            f.write("\nzsem consistency results:\n")
            for key, value in zsem_results['overall'].items():
                f.write(f"{key}: {value}\n")
    
    logger.log(f"Inference completed. Results saved to {args['output_dir']}")
    logger.log(f"Total samples generated: {total_samples}")
    logger.log("Features used:")
    logger.log(f"  - Semantic guidance: {args['use_semantic_guidance']}")
    if args['use_semantic_guidance']:
        logger.log(f"  - Interpolation method: {'SLERP' if args['use_slerp'] else 'Linear'}")
    logger.log(f"  - Batch processing: {args['batch_chunk']} samples/chunk")
    logger.log(f"  - Mixed precision: {args['use_fp16']}")
    logger.log(f"  - EMA weights: {args['use_ema']}")
    logger.log(f"  - Clipping mode: {args['clip_mode']}")
    
    if zsem_results and 'overall' in zsem_results:
        logger.log(f"zsem consistency: {zsem_results['overall']['mean_cosine_similarity']:.4f}±{zsem_results['overall']['std_cosine_similarity']:.4f}")

if __name__ == "__main__":
    main()
