import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import h5py
import os
import pickle
import cooler
from scipy import sparse
from scipy.stats import zscore
import warnings
import re
from collections import defaultdict
warnings.filterwarnings('ignore')

def normalize_hic_matrix(matrix, method='ice', log_transform=True):
    """
    Normalize Hi-C contact matrix
    
    Args:
        matrix: Hi-C contact matrix (numpy array)
        method: normalization method ('ice', 'zscore', 'log', 'oe', 'minmax', 'none')
        log_transform: whether to apply log(1+x) transformation (applied before method-specific scaling when relevant)
    
    Returns:
        normalized matrix
    """
    if method == 'none':
        normalized = matrix.copy()
    elif method == 'log':
        normalized = np.log1p(matrix)
    elif method == 'minmax':
        # Robust min-max scaling with optional log1p
        x = np.log1p(matrix) if log_transform else matrix.copy()
        # Use robust percentiles to avoid being dominated by extreme diagonal
        flat = x.flatten()
        # If there are positive entries, compute percentiles on positives only
        pos = flat[flat > 0]
        if pos.size > 0:
            lo = np.percentile(pos, 1.0)
            hi = np.percentile(pos, 99.5)
        else:
            lo = np.percentile(flat, 1.0)
            hi = np.percentile(flat, 99.5)
        if hi <= lo:
            hi = lo + 1e-6
        x = np.clip(x, lo, hi)
        normalized = (x - lo) / (hi - lo)
    elif method == 'zscore':
        if log_transform:
            matrix = np.log1p(matrix)
        # Avoid division by zero
        matrix_flat = matrix.flatten()
        matrix_flat = matrix_flat[matrix_flat > 0] if matrix_flat.max() > 0 else matrix_flat
        if len(matrix_flat) > 1 and matrix_flat.std() > 0:
            normalized = zscore(matrix, axis=None, nan_policy='omit')
            normalized = np.nan_to_num(normalized, 0)
        else:
            normalized = matrix
    elif method == 'oe':
        # Observed/Expected by genomic distance to flatten distance decay
        # Then optional log1p and robust rescaling to [0,1]
        x = matrix.astype(np.float64, copy=True)
        n = x.shape[0]
        # Compute expected per distance (diagonal offset), ignoring zeros
        dists = np.abs(np.subtract.outer(np.arange(n), np.arange(n)))
        expected = np.zeros(n, dtype=np.float64)
        for d in range(n):
            diag_vals = np.diagonal(x, offset=d)
            if d > 0:
                # include both upper and lower diagonals implicitly by symmetry assumption
                pass
            # ignore zeros when estimating expected
            nz = diag_vals[diag_vals > 0]
            if nz.size > 0:
                expected[d] = nz.mean()
            else:
                expected[d] = 0.0
        # Avoid division by zero
        expected[expected == 0] = 1.0
        # Vectorized O/E
        normalized = x / expected[dists]
        normalized = np.nan_to_num(normalized, 0.0, posinf=0.0, neginf=0.0)
        if log_transform:
            normalized = np.log1p(normalized)
        # Robust min-max to [0,1]
        flat = normalized.flatten()
        pos = flat[flat > 0]
        if pos.size > 0:
            lo = np.percentile(pos, 1.0)
            hi = np.percentile(pos, 99.5)
        else:
            lo = np.percentile(flat, 1.0)
            hi = np.percentile(flat, 99.5)
        if hi <= lo:
            hi = lo + 1e-6
        normalized = np.clip(normalized, lo, hi)
        normalized = (normalized - lo) / (hi - lo)
    elif method == 'ice':
        # Simple ICE-like normalization
        if log_transform:
            matrix = np.log1p(matrix)
        # Row/column sum normalization
        row_sums = matrix.sum(axis=1)
        col_sums = matrix.sum(axis=0)
        # Avoid division by zero
        row_sums[row_sums == 0] = 1
        col_sums[col_sums == 0] = 1
        normalized = matrix / np.sqrt(np.outer(row_sums, col_sums))
        normalized = np.nan_to_num(normalized, 0)
    else:
        raise ValueError(f"Unknown normalization method: {method}")
    
    return normalized.astype(np.float32)

def load_hic_from_cool(cool_path, chromosome=None, resolution=None, start=None, end=None):
    """
    Load Hi-C data from cooler format
    
    Args:
        cool_path: path to .cool file
        chromosome: chromosome name (e.g., 'chr1')
        resolution: resolution in bp
        start, end: genomic coordinates
    
    Returns:
        contact matrix as numpy array
    """
    try:
        clr = cooler.Cooler(cool_path)
        
        if chromosome is not None:
            if start is not None and end is not None:
                region = f"{chromosome}:{start}-{end}"
                matrix = clr.matrix(balance=False).fetch(region)
            else:
                matrix = clr.matrix(balance=False).fetch(chromosome)
        else:
            matrix = clr.matrix(balance=False)[:]
            
        return matrix
    except Exception as e:
        print(f"Error loading {cool_path}: {e}")
        return None

def create_genomic_bin_map(data, resolution, chromosomes=None):
    """
    Create mapping from genomic coordinates to matrix bins
    
    Args:
        data: pandas DataFrame with chr1, pos1, chr2, pos2, count columns
        resolution: Hi-C resolution in bp
        chromosomes: list of chromosomes to include (if None, use all)
    
    Returns:
        dict with bin mapping and chromosome info
    """
    if chromosomes is None:
        # Extract unique chromosomes
        chrs1 = set(data['chr1'].unique())
        chrs2 = set(data['chr2'].unique())
        chromosomes = sorted(list(chrs1.union(chrs2)), key=lambda x: (len(x), x))
    
    # Create bin mapping
    bin_map = {}
    chr_bins = {}
    current_bin = 0
    
    for chrom in chromosomes:
        # Get max position for this chromosome
        max_pos1 = data[data['chr1'] == chrom]['pos1'].max() if chrom in data['chr1'].values else 0
        max_pos2 = data[data['chr2'] == chrom]['pos2'].max() if chrom in data['chr2'].values else 0
        max_pos = max(max_pos1, max_pos2, 0)
        
        # Calculate number of bins for this chromosome
        num_bins = (max_pos // resolution) + 1
        
        chr_bins[chrom] = {
            'start_bin': current_bin,
            'end_bin': current_bin + num_bins - 1,
            'num_bins': num_bins,
            'max_pos': max_pos
        }
        
        # Create position to bin mapping for this chromosome
        for pos in range(0, max_pos + resolution, resolution):
            bin_map[(chrom, pos)] = current_bin
            current_bin += 1
            
        current_bin += num_bins
    
    return {
        'bin_map': bin_map,
        'chr_bins': chr_bins,
        'total_bins': current_bin,
        'chromosomes': chromosomes,
        'resolution': resolution
    }

def genomic_pos_to_bin(chrom, pos, chr_bins, resolution):
    """
    Convert genomic position to matrix bin index
    
    Args:
        chrom: chromosome name
        pos: genomic position
        chr_bins: chromosome bin information
        resolution: Hi-C resolution
    
    Returns:
        bin index
    """
    if chrom not in chr_bins:
        return None
    
    bin_offset = pos // resolution
    return chr_bins[chrom]['start_bin'] + bin_offset

def load_hic_from_txt(txt_path, size=None, format_type='auto', resolution=None, chromosomes=None):
    """
    Load Hi-C data from text format
    
    Args:
        txt_path: path to text file
        size: matrix size (if None, infer from data)
        format_type: 'auto', 'pairs' (chr1 pos1 chr2 pos2 count), or 'matrix' (i j count)
        resolution: Hi-C resolution in bp (required for 'pairs' format)
        chromosomes: list of chromosomes to include (if None, use all)
    
    Returns:
        contact matrix as numpy array
    """
    try:
        # Read the file and determine format
        data = pd.read_csv(txt_path, sep='\t', header=None, nrows=5)
        
        # Auto-detect format based on first row
        if format_type == 'auto':
            first_row = data.iloc[0]
            if len(first_row) >= 5:
                # Check if first two columns look like chromosome names
                if isinstance(first_row.iloc[0], str) and ('chr' in str(first_row.iloc[0]).lower() or 
                                                          str(first_row.iloc[0]).isdigit()):
                    format_type = 'pairs'
                else:
                    format_type = 'matrix'
            else:
                format_type = 'matrix'
        
        # Load data with appropriate column names
        if format_type == 'pairs':
            data = pd.read_csv(txt_path, sep='\t', header=None, 
                             names=['chr1', 'pos1', 'chr2', 'pos2', 'count'])
            return _load_from_pairs_format(data, size, resolution, chromosomes)
        else:
            data = pd.read_csv(txt_path, sep='\t', header=None, names=['i', 'j', 'count'])
            return _load_from_matrix_format(data, size)
            
    except Exception as e:
        print(f"Error loading {txt_path}: {e}")
        return None

def _load_from_pairs_format(data, size, resolution, chromosomes):
    """Load Hi-C data from pairs format (chr1 pos1 chr2 pos2 count)"""
    if resolution is None:
        raise ValueError("Resolution must be specified for pairs format")
    
    # Clean chromosome names (remove 'chr' prefix if present)
    data['chr1'] = data['chr1'].astype(str).str.replace('chr', '', regex=False)
    data['chr2'] = data['chr2'].astype(str).str.replace('chr', '', regex=False)
    
    # Create genomic bin mapping
    bin_info = create_genomic_bin_map(data, resolution, chromosomes)
    chr_bins = bin_info['chr_bins']
    total_bins = bin_info['total_bins']
    
    # Use specified size or infer from data
    if size is None:
        size = min(total_bins, 10000)  # Limit to reasonable size
    
    matrix = np.zeros((size, size), dtype=np.float32)
    
    # Fill matrix
    for _, row in data.iterrows():
        # Convert genomic coordinates to bin indices
        bin1 = genomic_pos_to_bin(row['chr1'], row['pos1'], chr_bins, resolution)
        bin2 = genomic_pos_to_bin(row['chr2'], row['pos2'], chr_bins, resolution)
        
        if bin1 is not None and bin2 is not None and bin1 < size and bin2 < size:
            count = float(row['count'])
            matrix[bin1, bin2] = count
            if bin1 != bin2:  # Make symmetric
                matrix[bin2, bin1] = count
    
    return matrix

def _load_from_matrix_format(data, size):
    """Load Hi-C data from matrix format (i j count)"""
    if size is None:
        size = max(data['i'].max(), data['j'].max()) + 1
        
    matrix = np.zeros((size, size), dtype=np.float32)
    
    # Fill matrix (assuming 0-based indexing)
    for _, row in data.iterrows():
        i, j, count = int(row['i']), int(row['j']), float(row['count'])
        if i < size and j < size:
            matrix[i, j] = count
            if i != j:  # Make symmetric
                matrix[j, i] = count
                
    return matrix

def interpolate_timepoints(matrix_early, matrix_late, alpha):
    """
    Linear interpolation between two Hi-C matrices
    
    Args:
        matrix_early: Hi-C matrix at early timepoint
        matrix_late: Hi-C matrix at late timepoint  
        alpha: interpolation factor (0=early, 1=late)
    
    Returns:
        interpolated matrix
    """
    return (1 - alpha) * matrix_early + alpha * matrix_late

def load_hic_from_npz(npz_path, chromosome=None, window_size=128, random_sample=True, start_h=None, start_w=None, band_limit_bins=None):
    """
    Load Hi-C data from npz format.
    Supports two cases:
      1) 稀疏 NPZ（scipy.sparse.save_npz 生成，包含 data/indices/indptr/shape），将转为稠密矩阵
      2) 稠密 NPZ（numpy.savez 生成，键为 chr1/chrX 等），直接读取对应键
    
    Args:
        npz_path: path to .npz file
        chromosome: preferred chromosome key when using dense NPZ
        window_size: output window size
        random_sample: kept for compatibility (ignored in grid mode)
        start_h/start_w: deterministic top-left indices
        band_limit_bins: near-diagonal band limit in bins (ignored here)
    """
    try:
        # Load npz file
        data = np.load(npz_path)
        keys = set(list(data.keys()))
        # Detect scipy.sparse NPZ
        if {"data", "indices", "indptr", "shape"}.issubset(keys):
            spm = sparse.load_npz(npz_path)
            matrix = spm.toarray()
        else:
            # Dense NPZ: choose key by chromosome or first available
            if chromosome is not None:
                chr_name = str(chromosome).replace('chr', '')
                possible_keys = [
                    chromosome,
                    f'chr{chr_name}',
                    chr_name,
                    f'matrix_{chromosome}',
                    f'matrix_chr{chr_name}',
                    f'matrix_{chr_name}'
                ]
                for key in possible_keys:
                    if key in data:
                        matrix = data[key]
                        break
                else:
                    first_key = list(data.keys())[0]
                    matrix = data[first_key]
            else:
                first_key = list(data.keys())[0]
                matrix = data[first_key]
        
        # Ensure matrix is 2D and square-like
        if matrix.ndim != 2:
            raise ValueError(f"Matrix must be 2D, got shape {matrix.shape}")
        
        # Deterministic window extraction if indices provided
        if start_h is not None and start_w is not None:
            matrix_window = extract_window_at(matrix, start_h, start_w, window_size)  # band limit enforced at window selection stage
        else:
            # Sample a window from the matrix
            matrix_window = sample_window_from_matrix(matrix, window_size, random_sample, band_limit_bins=band_limit_bins)
        
        return matrix_window
        
    except Exception as e:
        print(f"Error loading {npz_path}: {e}")
        return None

def sample_window_from_matrix(matrix, window_size, random_sample=True, min_contact_threshold=0.1, band_limit_bins=None):
    """
    Extract a square window from Hi-C matrix.
    - random_sample=False: center crop（确定性）
    - random_sample=True: 随机从主对角线附近采样（|sh-sw| <= band_limit_bins）
      若未提供 band_limit_bins，则默认约束为 window_size（与主对角线同带宽量级）
    
    Args:
        matrix: Hi-C contact matrix
        window_size: size of the square window
        random_sample: whether to sample a random window
        min_contact_threshold: (保留签名，不在此使用)
        band_limit_bins: near-diagonal constraint in bins (abs(sh-sw) <= band_limit_bins)
    
    Returns:
        windowed matrix of shape (window_size, window_size)
    """
    H, W = matrix.shape
    
    # If matrix is smaller than window_size, pad it
    if H < window_size or W < window_size:
        pad_h = max(0, window_size - H)
        pad_w = max(0, window_size - W)
        matrix = np.pad(matrix, ((0, pad_h), (0, pad_w)), mode='constant', constant_values=0)
        H, W = matrix.shape
    
    # If matrix is exactly window_size, return as is
    if H == window_size and W == window_size:
        return matrix.astype(np.float32)
    
    max_sh = max(0, H - window_size)
    max_sw = max(0, W - window_size)
    
    if not random_sample:
        # Center crop deterministically
        start_h = (H - window_size) // 2
        start_w = (W - window_size) // 2
        window = matrix[start_h:start_h + window_size, start_w:start_w + window_size]
        return window.astype(np.float32)
    
    # Random near-diagonal sampling
    band = int(band_limit_bins) if band_limit_bins is not None else int(window_size)
    attempts = 64
    rng = np.random.default_rng()
    for _ in range(attempts):
        sh = int(rng.integers(0, max_sh + 1))
        delta = int(rng.integers(-band, band + 1))
        sw = sh + delta
        if sw < 0:
            sw = 0
        if sw > max_sw:
            sw = max_sw
        if abs(sh - sw) <= band:
            window = matrix[sh:sh + window_size, sw:sw + window_size]
            return window.astype(np.float32)
    
    # Fallback: align along diagonal as much as possible
    sh = int(rng.integers(0, max_sh + 1))
    sw = max(0, min(sh, max_sw))
    window = matrix[sh:sh + window_size, sw:sw + window_size]
    return window.astype(np.float32)

def extract_window_at(matrix, start_h, start_w, window_size):
    """
    Extract a window at given top-left (start_h, start_w). If the matrix is
    smaller than window_size, pad; if indices are near borders, clamp to ensure
    a full window of size (window_size, window_size) is returned.
    """
    H, W = matrix.shape
    
    # Pad if needed
    if H < window_size or W < window_size:
        pad_h = max(0, window_size - H)
        pad_w = max(0, window_size - W)
        matrix = np.pad(matrix, ((0, pad_h), (0, pad_w)), mode='constant', constant_values=0)
        H, W = matrix.shape
    
    # Clamp starts to valid range
    max_sh = max(0, H - window_size)
    max_sw = max(0, W - window_size)
    sh = int(max(0, min(start_h, max_sh)))
    sw = int(max(0, min(start_w, max_sw)))
    
    window = matrix[sh:sh + window_size, sw:sw + window_size]
    return window.astype(np.float32)


class HiCDataset(Dataset):
    def __init__(self, 
                 data_dir, 
                 timepoints=['G1', 'late_S1'],
                 file_format='npz',  # 'cool', 'txt', 'npy', 'npz'
                 txt_format='auto',   # 'auto', 'pairs', 'matrix' for txt files
                 chromosome='chr1',   # specific chromosome to load
                 chromosomes=None,    # list of chromosomes to include (for pairs format)
                 resolution=100000,
                 window_size=128,     # window size for sampling
                 matrix_size=128,     # deprecated, use window_size instead
                 normalization='ice',
                 log_transform=True,
                 interpolation_prob=0.3,  # probability of using interpolated samples
                 augmentation=False,
                 random_window=True,  # whether to randomly sample windows
                 use_grid_windows=False,   # iterate over all windows per matrix
                 window_stride=None,      # stride for grid windows (default: window_size)
                 band_limit_bp=5_000_000, # near-diagonal limit in base pairs (default 5Mb)
                 band_limit_bins=None,    # override in bins; if set, ignores band_limit_bp
                 output_range="0,1"):    # output value range: "0,1" or "-1,1"
        """
        Hi-C Dataset for temporal interpolation with windowed sampling
        
        Args:
            data_dir: directory containing Hi-C files
            timepoints: list of timepoint names
            file_format: format of Hi-C files ('cool', 'txt', 'npy', 'npz')
            txt_format: format for txt files ('auto', 'pairs', 'matrix')
            chromosome: specific chromosome to load (e.g., 'chr1')
            chromosomes: list of chromosomes to include (for pairs format)
            resolution: Hi-C resolution in bp
            window_size: size of the square window to extract
            matrix_size: deprecated, kept for backwards compatibility
            normalization: normalization method
            log_transform: apply log transformation
            interpolation_prob: probability of generating interpolated samples
            augmentation: apply data augmentation
            random_window: whether to randomly sample window positions
            use_grid_windows: if True, iterate over all windows (overrides random_window)
            window_stride: stride for grid windows; default equals window_size (non-overlap)
            band_limit_bp: only consider windows within this genomic distance from diagonal (bp)
            band_limit_bins: same as above but directly in bins; if set, overrides band_limit_bp
        """
        self.data_dir = data_dir
        self.timepoints = timepoints
        self.file_format = file_format
        self.txt_format = txt_format
        self.chromosome = chromosome
        self.chromosomes = chromosomes
        self.resolution = resolution
        
        # Prefer explicit window_size; if与matrix_size不一致且提供，则按matrix_size以兼容旧参数
        self.window_size = int(matrix_size) if (matrix_size is not None and int(matrix_size) != int(window_size)) else int(window_size)
        self.matrix_size = self.window_size  # For backwards compatibility
        
        self.normalization = normalization
        self.log_transform = log_transform
        self.interpolation_prob = interpolation_prob
        self.augmentation = augmentation
        self.random_window = random_window
        self.use_grid_windows = use_grid_windows
        self.output_range = str(output_range)
        if window_stride is not None:
            # Cast to int in case window_stride is provided as a string (e.g., from CLI arguments)
            self.window_stride = int(window_stride)
        else:
            self.window_stride = self.window_size
        
        # Band-limit in bins
        if band_limit_bins is not None:
            self.band_limit_bins = int(band_limit_bins)
        else:
            self.band_limit_bins = int(round(band_limit_bp / self.resolution)) if band_limit_bp is not None else None
        
        self.file_list = self._scan_files()
        print(f"Found {len(self.file_list)} Hi-C files")
        
        if self.chromosome:
            print(f"Loading data for chromosome: {self.chromosome}")
        print(f"Window size: {self.window_size}x{self.window_size}")
        if self.use_grid_windows:
            print(f"Using grid windows with stride {self.window_stride}")
        if self.band_limit_bins is not None:
            print(f"Near-diagonal band limit: {self.band_limit_bins} bins (~{self.band_limit_bins * self.resolution} bp)")
        
        # If using grid windows, precompute window indices per file
        self.grid_index = None
        if self.use_grid_windows:
            self.grid_index = self._build_grid_index()
        
    def _scan_files(self):
        """Scan data directory for Hi-C files"""
        file_list = []
        
        for timepoint in self.timepoints:
            timepoint_dir = os.path.join(self.data_dir, timepoint)
            if not os.path.exists(timepoint_dir):
                print(f"Warning: {timepoint_dir} does not exist")
                continue
                
            for filename in os.listdir(timepoint_dir):
                file_path = os.path.join(timepoint_dir, filename)
                
                if self.file_format == 'cool' and filename.endswith('.cool'):
                    file_list.append({'path': file_path, 'timepoint': timepoint, 'cell_id': filename})
                elif self.file_format == 'txt' and filename.endswith('.txt'):
                    file_list.append({'path': file_path, 'timepoint': timepoint, 'cell_id': filename})
                elif self.file_format == 'npy' and filename.endswith('.npy'):
                    file_list.append({'path': file_path, 'timepoint': timepoint, 'cell_id': filename})
                elif self.file_format == 'npz' and filename.endswith('.npz'):
                    # Try to infer chromosome from filename, e.g., *_chr1.npz or chr2_*.npz
                    chrom_from_name = None
                    m = re.search(r'(chr[0-9XYM]+)', filename, re.IGNORECASE)
                    if m:
                        chrom_from_name = m.group(1)
                        # Normalize prefix to lowercase 'chr'
                        if not chrom_from_name.lower().startswith('chr'):
                            chrom_from_name = f"chr{chrom_from_name}"
                        chrom_from_name = chrom_from_name.lower()
                    file_list.append({'path': file_path, 'timepoint': timepoint, 'cell_id': filename, 'chromosome': chrom_from_name})
                    
        return file_list
    
    def _load_matrix(self, file_info):
        """Load Hi-C matrix from file with windowed sampling"""
        file_path = file_info['path']
        
        if self.file_format == 'cool':
            # For cooler format, load full chromosome then sample window
            full_matrix = load_hic_from_cool(file_path, self.chromosome, self.resolution)
            if full_matrix is not None:
                matrix = sample_window_from_matrix(full_matrix, self.window_size, self.random_window, band_limit_bins=self.band_limit_bins)
            else:
                matrix = None
                
        elif self.file_format == 'txt':
            # For text format, this will create full matrix then sample window
            full_matrix = load_hic_from_txt(
                file_path, 
                size=None,  # Let it infer size
                format_type=self.txt_format,
                resolution=self.resolution,
                chromosomes=self.chromosomes
            )
            if full_matrix is not None:
                matrix = sample_window_from_matrix(full_matrix, self.window_size, self.random_window, band_limit_bins=self.band_limit_bins)
            else:
                matrix = None
                
        elif self.file_format == 'npy':
            # For npy format, assume it's already a matrix
            full_matrix = np.load(file_path)
            matrix = sample_window_from_matrix(full_matrix, self.window_size, self.random_window, band_limit_bins=self.band_limit_bins)
            
        elif self.file_format == 'npz':
            # For npz format, load chromosome-specific data with windowing
            # Prefer chromosome parsed from filename if available
            chrom_use = file_info.get('chromosome', self.chromosome)
            matrix = load_hic_from_npz(
                file_path, 
                chromosome=chrom_use,
                window_size=self.window_size,
                random_sample=self.random_window,
                band_limit_bins=self.band_limit_bins
            )
        else:
            raise ValueError(f"Unsupported file format: {self.file_format}")
            
        if matrix is None:
            return None
        
        # Ensure matrix is the right size
        if matrix.shape != (self.window_size, self.window_size):
            # Resize if needed
            matrix = self._resize_matrix(matrix)
        
        # Normalize
        matrix = normalize_hic_matrix(matrix, self.normalization, self.log_transform)
        if self.output_range.strip().replace(' ', '') == '-1,1':
            matrix = (matrix * 2.0) - 1.0
        
        return matrix
    
    def _resize_matrix(self, matrix):
        """Resize matrix to target window size"""
        current_size = matrix.shape[0]
        
        if current_size == self.window_size:
            return matrix
        elif current_size > self.window_size:
            # Crop from center
            start = (current_size - self.window_size) // 2
            end = start + self.window_size
            return matrix[start:end, start:end]
        else:
            # Pad with zeros
            pad_size = (self.window_size - current_size) // 2
            padded = np.pad(matrix, pad_size, mode='constant', constant_values=0)
            # Adjust if padding is not exact
            if padded.shape[0] != self.window_size:
                padded = padded[:self.window_size, :self.window_size]
            return padded
        
    def _augment_matrix(self, matrix):
        """Apply data augmentation"""
        if not self.augmentation:
            return matrix
            
        # Random rotation (90, 180, 270 degrees)
        if np.random.rand() < 0.5:
            k = np.random.randint(1, 4)
            matrix = np.rot90(matrix, k)
            
        # Random flip
        if np.random.rand() < 0.5:
            matrix = np.flip(matrix, axis=0)
            matrix = np.flip(matrix, axis=1)
            
        return matrix
    
    def _build_grid_index(self):
        """
        Build a flat index mapping each dataset index to (file_idx, start_h, start_w)
        for deterministic grid windows over each matrix.
        """
        grid = []
        # Iterate files and compute grid for NPZ (chromosome-specific) or other formats
        for file_idx, file_info in enumerate(self.file_list):
            # Load shape without loading full data into memory where possible
            try:
                if self.file_format == 'npz':
                    data = np.load(file_info['path'])
                    keys = set(list(data.keys()))
                    # Handle scipy.sparse NPZ (data/indices/indptr/shape)
                    if {"data", "indices", "indptr", "shape"}.issubset(keys):
                        shape_arr = data["shape"]
                        try:
                            H, W = int(shape_arr[0]), int(shape_arr[1])
                        except Exception:
                            from scipy import sparse as _sps
                            H, W = _sps.load_npz(file_info['path']).shape
                    else:
                        # Resolve chromosome: prefer from filename, fallback to dataset setting or first key
                        chrom_use = file_info.get('chromosome', self.chromosome)
                        if chrom_use is not None:
                            chr_name = str(chrom_use).replace('chr', '')
                            possible_keys = [chrom_use, f'chr{chr_name}', chr_name, f'matrix_{chrom_use}', f'matrix_chr{chr_name}', f'matrix_{chr_name}']
                            for key in possible_keys:
                                if key in data:
                                    mat = data[key]
                                    break
                            else:
                                first_key = list(data.keys())[0]
                                mat = data[first_key]
                        else:
                            first_key = list(data.keys())[0]
                            mat = data[first_key]
                        H, W = mat.shape
                elif self.file_format == 'npy':
                    mat = np.load(file_info['path'])
                    H, W = mat.shape
                elif self.file_format == 'cool':
                    mat = load_hic_from_cool(file_info['path'], self.chromosome, self.resolution)
                    if mat is None:
                        continue
                    H, W = mat.shape
                elif self.file_format == 'txt':
                    mat = load_hic_from_txt(file_info['path'], size=None, format_type=self.txt_format, resolution=self.resolution, chromosomes=self.chromosomes)
                    if mat is None:
                        continue
                    H, W = mat.shape
                else:
                    continue
            except Exception:
                continue
            
            # If smaller than window, treat as single window at (0,0)
            stride = self.window_stride
            ws = self.window_size
            if H <= ws or W <= ws:
                # Band check for (0,0) is always satisfied
                grid.append((file_idx, 0, 0))
                continue
            
            # Generate grid of top-left coordinates
            max_h = max(0, H - ws)
            max_w = max(0, W - ws)
            for sh in range(0, max_h + 1, stride):
                for sw in range(0, max_w + 1, stride):
                    if self.band_limit_bins is not None and abs(sh - sw) > self.band_limit_bins:
                        continue
                    grid.append((file_idx, sh, sw))
            
            # Border alignments (ensure coverage near edges)
            if (max_h % stride) != 0:
                sh = max_h
                for sw in range(0, max_w + 1, stride):
                    if self.band_limit_bins is not None and abs(sh - sw) > self.band_limit_bins:
                        continue
                    grid.append((file_idx, sh, sw))
            if (max_w % stride) != 0:
                sw = max_w
                for sh in range(0, max_h + 1, stride):
                    if self.band_limit_bins is not None and abs(sh - sw) > self.band_limit_bins:
                        continue
                    grid.append((file_idx, sh, sw))
            if (max_h % stride) != 0 and (max_w % stride) != 0:
                sh = max_h
                sw = max_w
                if self.band_limit_bins is None or abs(sh - sw) <= self.band_limit_bins:
                    grid.append((file_idx, sh, sw))
        
        return grid
    
    def __len__(self):
        # Include interpolated samples for random-window mode only
        base_length = len(self.file_list)
        if self.use_grid_windows:
            return len(self.grid_index) if self.grid_index is not None else 0
        if self.interpolation_prob > 0:
            early_files = [f for f in self.file_list if f['timepoint'] == self.timepoints[0]]
            late_files = [f for f in self.file_list if f['timepoint'] == self.timepoints[-1]]
            interpolated_length = min(len(early_files), len(late_files))
            return base_length + interpolated_length
        return base_length
    
    def __getitem__(self, idx):
        if self.use_grid_windows:
            # Grid indexing mode: each idx maps to (file_idx, start_h, start_w)
            file_idx, sh, sw = self.grid_index[idx]
            file_info = self.file_list[file_idx]
            # Deterministic extraction
            if self.file_format == 'npz':
                matrix = load_hic_from_npz(
                    file_info['path'],
                    chromosome=self.chromosome,
                    window_size=self.window_size,
                    random_sample=False,
                    start_h=sh,
                    start_w=sw,
                    band_limit_bins=self.band_limit_bins
                )
            else:
                # Fallback: load full matrix then crop deterministically
                full = self._load_full_matrix_for_grid(file_info)
                if full is None:
                    matrix = np.zeros((self.window_size, self.window_size), dtype=np.float32)
                else:
                    matrix = extract_window_at(full, sh, sw, self.window_size)
            
            # Normalize and augment
            if matrix is None:
                matrix = np.zeros((self.window_size, self.window_size), dtype=np.float32)
            matrix = normalize_hic_matrix(matrix, self.normalization, self.log_transform)
            if self.output_range.strip().replace(' ', '') == '-1,1':
                matrix = (matrix * 2.0) - 1.0
            # matrix = self._augment_matrix(matrix)
            
            # Time label from file's timepoint
            time_value = self.timepoints.index(file_info['timepoint']) / (len(self.timepoints) - 1)
            
            return {
                'hic': torch.tensor(matrix[None, :, :], dtype=torch.float32),
                'time': torch.tensor(time_value, dtype=torch.float32),
                'cell_id': f"{file_info['cell_id']}@{sh},{sw}",
                'timepoint': file_info['timepoint'],
            }
        
        # Random-window mode (original behavior)
        base_length = len(self.file_list)
        
        # Regular samples
        if idx < base_length:
            file_info = self.file_list[idx]
            matrix = self._load_matrix(file_info)
            
            if matrix is None:
                # Return zero matrix if loading fails
                matrix = np.zeros((self.window_size, self.window_size), dtype=np.float32)
            
            # matrix = self._augment_matrix(matrix)
            
            # Convert timepoint to numerical value
            time_value = self.timepoints.index(file_info['timepoint']) / (len(self.timepoints) - 1)
            
            return {
                'hic': torch.tensor(matrix[None, :, :], dtype=torch.float32),  # [1, H, W]
                'time': torch.tensor(time_value, dtype=torch.float32),
                'cell_id': file_info['cell_id'],
                'timepoint': file_info['timepoint']
            }
        
        # Interpolated samples
        else:
            early_files = [f for f in self.file_list if f['timepoint'] == self.timepoints[0]]
            late_files = [f for f in self.file_list if f['timepoint'] == self.timepoints[-1]]
            
            interp_idx = idx - base_length
            early_idx = interp_idx % len(early_files)
            late_idx = interp_idx % len(late_files)
            
            matrix_early = self._load_matrix(early_files[early_idx])
            matrix_late = self._load_matrix(late_files[late_idx])
            
            if matrix_early is None or matrix_late is None:
                # Return zero matrix if loading fails
                matrix = np.zeros((self.window_size, self.window_size), dtype=np.float32)
                time_value = np.random.rand()
            else:
                # Random interpolation factor
                alpha = np.random.rand()
                matrix = interpolate_timepoints(matrix_early, matrix_late, alpha)
                matrix = self._augment_matrix(matrix)
                time_value = alpha
            
            return {
                'hic': torch.tensor(matrix[None, :, :], dtype=torch.float32),  # [1, H, W]
                'time': torch.tensor(time_value, dtype=torch.float32),
                'cell_id': f'interpolated_{interp_idx}',
                'timepoint': f'interpolated_{time_value:.3f}'
            }
    
    def _load_full_matrix_for_grid(self, file_info):
        """Load full matrix without sampling for grid extraction paths."""
        file_path = file_info['path']
        if self.file_format == 'cool':
            return load_hic_from_cool(file_path, self.chromosome, self.resolution)
        elif self.file_format == 'txt':
            return load_hic_from_txt(
                file_path, 
                size=None,
                format_type=self.txt_format,
                resolution=self.resolution,
                chromosomes=self.chromosomes
            )
        elif self.file_format == 'npy':
            return np.load(file_path)
        elif self.file_format == 'npz':
            # Return full matrix for the chromosome
            try:
                data = np.load(file_path)
                if self.chromosome is not None:
                    chr_name = str(self.chromosome).replace('chr', '')
                    possible_keys = [self.chromosome, f'chr{chr_name}', chr_name, f'matrix_{self.chromosome}', f'matrix_chr{chr_name}', f'matrix_{chr_name}']
                    for key in possible_keys:
                        if key in data:
                            return data[key]
                    first_key = list(data.keys())[0]
                    return data[first_key]
                else:
                    first_key = list(data.keys())[0]
                    return data[first_key]
            except Exception:
                return None
        else:
            return None

def create_hic_dataloader(data_dir, 
                         batch_size=8,
                         timepoints=['G1', 'late_S1'],
                         file_format='npz',
                         txt_format='auto',
                         chromosome='chr1',
                         chromosomes=None,
                         resolution=100000,
                         window_size=128,
                         matrix_size=128,  # deprecated, use window_size instead
                         normalization='none',
                         log_transform=False,
                         interpolation_prob=0.3,
                         augmentation=False,
                         random_window=True,
                         use_grid_windows=False,
                         window_stride=None,
                         band_limit_bp=None,
                         band_limit_bins=None,
                         output_range="0,1",
                         num_workers=4):
    """
    Create Hi-C dataloader with windowed sampling
    
    Args:
        data_dir: directory containing Hi-C files
        batch_size: batch size for training
        timepoints: list of timepoint names
        file_format: format of Hi-C files ('cool', 'txt', 'npy', 'npz')
        txt_format: format for txt files ('auto', 'pairs', 'matrix')
        chromosome: specific chromosome to load (e.g., 'chr1')
        chromosomes: list of chromosomes to include (for pairs format)
        resolution: Hi-C resolution in bp
        window_size: size of the square window to extract
        matrix_size: deprecated, kept for backwards compatibility
        normalization: normalization method
        log_transform: apply log transformation
        interpolation_prob: probability of generating interpolated samples
        augmentation: apply data augmentation
        random_window: whether to randomly sample window positions
        use_grid_windows: iterate over all windows deterministically
        window_stride: stride for grid windows (defaults to window_size)
        band_limit_bp: near-diagonal window limit in base pairs (converted to bins by resolution)
        band_limit_bins: near-diagonal window limit directly in bins (overrides band_limit_bp)
        num_workers: number of workers for data loading
    
    Returns:
        DataLoader for Hi-C dataset
    """
    dataset = HiCDataset(
        data_dir=data_dir,
        timepoints=timepoints,
        file_format=file_format,
        txt_format=txt_format,
        chromosome=chromosome,
        chromosomes=chromosomes,
        resolution=resolution,
        window_size=window_size,
        matrix_size=matrix_size,
        normalization=normalization,
        log_transform=log_transform,
        interpolation_prob=interpolation_prob,
        augmentation=augmentation,
        random_window=random_window,
        use_grid_windows=use_grid_windows,
        window_stride=window_stride,
        band_limit_bp=band_limit_bp,               # rely on bins derived from resolution at runtime
        band_limit_bins=band_limit_bins,            # allow override; pass through if provided
        output_range=output_range
    )

    # 训练时记录数据集长度（样本数）
    try:
        print(f"HiCDataset __len__ = {len(dataset)}")
    except Exception:
        pass
    
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=True,
        drop_last=False
    )
    
    return dataloader

# Utility functions for Hi-C analysis
def calculate_insulation_score(matrix, window_size=10):
    """Calculate insulation score for TAD detection"""
    size = matrix.shape[0]
    insulation = np.zeros(size)
    
    for i in range(window_size, size - window_size):
        # Calculate mean contact frequency in surrounding windows
        upstream = matrix[i-window_size:i, i-window_size:i].mean()
        downstream = matrix[i:i+window_size, i:i+window_size].mean()
        cross = matrix[i-window_size:i, i:i+window_size].mean()
        
        if upstream + downstream > 0:
            insulation[i] = cross / (0.5 * (upstream + downstream))
    
    return insulation

def detect_compartments(matrix, method='pca'):
    """Detect A/B compartments using PCA"""
    if method == 'pca':
        # Pearson correlation matrix
        corr_matrix = np.corrcoef(matrix)
        corr_matrix = np.nan_to_num(corr_matrix, 0)
        
        # PCA
        eigenvals, eigenvecs = np.linalg.eigh(corr_matrix)
        pc1 = eigenvecs[:, -1]  # First principal component
        
        return pc1
    else:
        raise ValueError(f"Unknown method: {method}")
