"""
Hi-C specific evaluation metrics and analysis tools
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr, spearmanr
from scipy.ndimage import gaussian_filter
from sklearn.metrics import mean_squared_error, mean_absolute_error
import pandas as pd
import warnings
warnings.filterwarnings('ignore')

def calculate_contact_decay(matrix, max_distance=None):
    """
    Calculate contact probability decay with genomic distance
    
    Args:
        matrix: Hi-C contact matrix [H, W]
        max_distance: maximum distance to consider
    
    Returns:
        distances: array of distances
        contact_probs: array of contact probabilities
    """
    if max_distance is None:
        max_distance = min(matrix.shape) // 2
    
    distances = []
    contact_probs = []
    
    for d in range(1, max_distance):
        diagonal_contacts = []
        for i in range(matrix.shape[0] - d):
            diagonal_contacts.append(matrix[i, i + d])
        
        if diagonal_contacts:
            distances.append(d)
            contact_probs.append(np.mean(diagonal_contacts))
    
    return np.array(distances), np.array(contact_probs)

def calculate_insulation_score(matrix, window_size=10):
    """
    Calculate insulation score for TAD boundary detection
    
    Args:
        matrix: Hi-C contact matrix [H, W]
        window_size: window size for insulation calculation
    
    Returns:
        insulation_scores: array of insulation scores
    """
    size = matrix.shape[0]
    insulation = np.zeros(size)
    
    for i in range(window_size, size - window_size):
        # Calculate contact strength across the boundary
        upstream = matrix[i-window_size:i, i-window_size:i]
        downstream = matrix[i:i+window_size, i:i+window_size]
        cross = matrix[i-window_size:i, i:i+window_size]
        
        upstream_mean = upstream.mean() if upstream.size > 0 else 0
        downstream_mean = downstream.mean() if downstream.size > 0 else 0
        cross_mean = cross.mean() if cross.size > 0 else 0
        
        if upstream_mean + downstream_mean > 0:
            insulation[i] = cross_mean / (0.5 * (upstream_mean + downstream_mean))
        else:
            insulation[i] = 1.0
    
    return insulation

def detect_tad_boundaries(insulation_scores, threshold_percentile=10):
    """
    Detect TAD boundaries from insulation scores
    
    Args:
        insulation_scores: array of insulation scores
        threshold_percentile: percentile threshold for boundary detection
    
    Returns:
        boundaries: indices of TAD boundaries
    """
    threshold = np.percentile(insulation_scores[insulation_scores > 0], threshold_percentile)
    boundaries = np.where(insulation_scores < threshold)[0]
    return boundaries

def calculate_compartment_strength(matrix, method='pca'):
    """
    Calculate A/B compartment strength using PCA
    
    Args:
        matrix: Hi-C contact matrix [H, W]
        method: method for compartment detection ('pca')
    
    Returns:
        pc1: first principal component (compartment signal)
        eigenvalue: eigenvalue of first component
    """
    if method == 'pca':
        # Convert to correlation matrix
        obs_exp = matrix / (np.outer(matrix.sum(axis=1), matrix.sum(axis=0)) / matrix.sum() + 1e-10)
        obs_exp = np.nan_to_num(obs_exp, 0)
        
        # Apply log transformation
        corr_matrix = np.log2(obs_exp + 1)
        corr_matrix = np.corrcoef(corr_matrix)
        corr_matrix = np.nan_to_num(corr_matrix, 0)
        
        # PCA
        eigenvals, eigenvecs = np.linalg.eigh(corr_matrix)
        # Sort by eigenvalue (descending)
        idx = np.argsort(eigenvals)[::-1]
        eigenvals = eigenvals[idx]
        eigenvecs = eigenvecs[:, idx]
        
        pc1 = eigenvecs[:, 0]
        eigenvalue = eigenvals[0]
        
        return pc1, eigenvalue
    else:
        raise ValueError(f"Unknown method: {method}")

def calculate_stripes_score(matrix):
    """
    Calculate stripe-like pattern score
    
    Args:
        matrix: Hi-C contact matrix [H, W]
    
    Returns:
        stripe_score: measure of stripe-like patterns
    """
    # Apply Gaussian filter to reduce noise
    filtered = gaussian_filter(matrix, sigma=1.0)
    
    # Calculate gradients
    grad_y, grad_x = np.gradient(filtered)
    gradient_magnitude = np.sqrt(grad_x**2 + grad_y**2)
    
    # Calculate stripe score as variance of gradients
    stripe_score = np.var(gradient_magnitude)
    
    return stripe_score

def evaluate_hic_quality(pred_matrix, true_matrix, return_all_metrics=False):
    """
    Comprehensive Hi-C quality evaluation
    
    Args:
        pred_matrix: predicted Hi-C matrix [H, W]
        true_matrix: ground truth Hi-C matrix [H, W]
        return_all_metrics: whether to return detailed metrics
    
    Returns:
        metrics: dictionary of evaluation metrics
    """
    # Ensure input matrices are numpy arrays
    if isinstance(pred_matrix, np.ndarray):
        pred_flat = pred_matrix.flatten()
    else:
        # Torch Tensor -> CPU numpy
        import torch
        if isinstance(pred_matrix, torch.Tensor):
            pred_flat = pred_matrix.detach().cpu().numpy().flatten()
        else:
            pred_flat = np.asarray(pred_matrix).flatten()

    if isinstance(true_matrix, np.ndarray):
        true_flat = true_matrix.flatten()
    else:
        import torch
        if isinstance(true_matrix, torch.Tensor):
            true_flat = true_matrix.detach().cpu().numpy().flatten()
        else:
            true_flat = np.asarray(true_matrix).flatten()
    
    # Basic correlation metrics
    # Support both [0,1] and [-1,1] ranges
    # Use absolute value threshold to avoid excluding negative values in [-1,1] range
    threshold = -0.99  # Very permissive threshold for [-1,1] range
    mask = (true_flat > threshold) | (pred_flat > threshold)
    if mask.sum() == 0:
        return {'error': 'No valid entries for correlation'}
    
    pred_masked = pred_flat[mask]
    true_masked = true_flat[mask]
    
    try:
        pearson_corr, pearson_p = pearsonr(pred_masked, true_masked)
        spearman_corr, spearman_p = spearmanr(pred_masked, true_masked)
    except:
        pearson_corr = pearson_p = spearman_corr = spearman_p = 0
    
    # Error metrics
    mse = mean_squared_error(true_masked, pred_masked)
    mae = mean_absolute_error(true_masked, pred_masked)
    
    metrics = {
        'pearson_correlation': pearson_corr,
        'spearman_correlation': spearman_corr,
        'mse': mse,
        'mae': mae,
        'valid_entries': mask.sum()
    }
    
    if return_all_metrics:
        # Contact decay comparison
        try:
            true_dist, true_decay = calculate_contact_decay(true_matrix)
            pred_dist, pred_decay = calculate_contact_decay(pred_matrix)
            
            if len(true_decay) > 0 and len(pred_decay) > 0:
                min_len = min(len(true_decay), len(pred_decay))
                decay_corr, _ = pearsonr(true_decay[:min_len], pred_decay[:min_len])
                metrics['decay_correlation'] = decay_corr
        except:
            metrics['decay_correlation'] = 0
        
        # Insulation score comparison
        try:
            true_insulation = calculate_insulation_score(true_matrix)
            pred_insulation = calculate_insulation_score(pred_matrix)
            
            if len(true_insulation) > 0 and len(pred_insulation) > 0:
                insulation_corr, _ = pearsonr(true_insulation, pred_insulation)
                metrics['insulation_correlation'] = insulation_corr
        except:
            metrics['insulation_correlation'] = 0
        
        # Compartment comparison
        try:
            true_pc1, true_eigenval = calculate_compartment_strength(true_matrix)
            pred_pc1, pred_eigenval = calculate_compartment_strength(pred_matrix)
            
            # Account for sign ambiguity in PCA
            corr1 = pearsonr(true_pc1, pred_pc1)[0]
            corr2 = pearsonr(true_pc1, -pred_pc1)[0]
            compartment_corr = max(abs(corr1), abs(corr2))
            
            metrics['compartment_correlation'] = compartment_corr
            metrics['compartment_strength_true'] = true_eigenval
            metrics['compartment_strength_pred'] = pred_eigenval
        except:
            metrics['compartment_correlation'] = 0
            metrics['compartment_strength_true'] = 0
            metrics['compartment_strength_pred'] = 0
        
        # Stripe pattern score
        try:
            true_stripes = calculate_stripes_score(true_matrix)
            pred_stripes = calculate_stripes_score(pred_matrix)
            metrics['stripe_score_true'] = true_stripes
            metrics['stripe_score_pred'] = pred_stripes
            metrics['stripe_score_ratio'] = pred_stripes / (true_stripes + 1e-10)
        except:
            metrics['stripe_score_true'] = 0
            metrics['stripe_score_pred'] = 0
            metrics['stripe_score_ratio'] = 1
    
    return metrics

def visualize_hic_comparison(pred_matrix, true_matrix, save_path=None, vmax=None):
    """
    Visualize comparison between predicted and true Hi-C matrices
    
    Args:
        pred_matrix: predicted Hi-C matrix [H, W]
        true_matrix: ground truth Hi-C matrix [H, W]
        save_path: path to save the plot
        vmax: maximum value for color scale
    
    Returns:
        fig: matplotlib figure
    """
    fig, axes = plt.subplots(2, 3, figsize=(15, 10))
    
    if vmax is None:
        vmax = max(true_matrix.max(), pred_matrix.max())
    
    # True matrix
    im1 = axes[0, 0].imshow(true_matrix, cmap='Reds', vmin=0, vmax=vmax)
    axes[0, 0].set_title('Ground Truth')
    axes[0, 0].set_xlabel('Genomic Position')
    axes[0, 0].set_ylabel('Genomic Position')
    
    # Predicted matrix
    im2 = axes[0, 1].imshow(pred_matrix, cmap='Reds', vmin=0, vmax=vmax)
    axes[0, 1].set_title('Predicted')
    axes[0, 1].set_xlabel('Genomic Position')
    axes[0, 1].set_ylabel('Genomic Position')
    
    # Difference
    diff = pred_matrix - true_matrix
    diff_max = max(abs(diff.min()), abs(diff.max()))
    im3 = axes[0, 2].imshow(diff, cmap='RdBu_r', vmin=-diff_max, vmax=diff_max)
    axes[0, 2].set_title('Difference (Pred - True)')
    axes[0, 2].set_xlabel('Genomic Position')
    axes[0, 2].set_ylabel('Genomic Position')
    
    # Contact decay curves
    try:
        true_dist, true_decay = calculate_contact_decay(true_matrix)
        pred_dist, pred_decay = calculate_contact_decay(pred_matrix)
        
        axes[1, 0].loglog(true_dist, true_decay, label='True', color='blue')
        axes[1, 0].loglog(pred_dist, pred_decay, label='Predicted', color='red')
        axes[1, 0].set_xlabel('Genomic Distance')
        axes[1, 0].set_ylabel('Contact Probability')
        axes[1, 0].set_title('Contact Decay')
        axes[1, 0].legend()
        axes[1, 0].grid(True, alpha=0.3)
    except:
        axes[1, 0].text(0.5, 0.5, 'Contact decay\ncalculation failed', 
                       ha='center', va='center', transform=axes[1, 0].transAxes)
    
    # Insulation scores
    try:
        true_insulation = calculate_insulation_score(true_matrix)
        pred_insulation = calculate_insulation_score(pred_matrix)
        
        x_pos = np.arange(len(true_insulation))
        axes[1, 1].plot(x_pos, true_insulation, label='True', color='blue')
        axes[1, 1].plot(x_pos, pred_insulation, label='Predicted', color='red')
        axes[1, 1].set_xlabel('Genomic Position')
        axes[1, 1].set_ylabel('Insulation Score')
        axes[1, 1].set_title('Insulation Score')
        axes[1, 1].legend()
        axes[1, 1].grid(True, alpha=0.3)
    except:
        axes[1, 1].text(0.5, 0.5, 'Insulation score\ncalculation failed', 
                       ha='center', va='center', transform=axes[1, 1].transAxes)
    
    # Compartment signals
    try:
        true_pc1, _ = calculate_compartment_strength(true_matrix)
        pred_pc1, _ = calculate_compartment_strength(pred_matrix)
        
        x_pos = np.arange(len(true_pc1))
        axes[1, 2].plot(x_pos, true_pc1, label='True', color='blue')
        axes[1, 2].plot(x_pos, pred_pc1, label='Predicted', color='red')
        axes[1, 2].axhline(y=0, color='black', linestyle='--', alpha=0.5)
        axes[1, 2].set_xlabel('Genomic Position')
        axes[1, 2].set_ylabel('PC1 (Compartment Signal)')
        axes[1, 2].set_title('A/B Compartments')
        axes[1, 2].legend()
        axes[1, 2].grid(True, alpha=0.3)
    except:
        axes[1, 2].text(0.5, 0.5, 'Compartment\ncalculation failed', 
                       ha='center', va='center', transform=axes[1, 2].transAxes)
    
    # Add colorbars
    plt.colorbar(im1, ax=axes[0, 0], shrink=0.6)
    plt.colorbar(im2, ax=axes[0, 1], shrink=0.6)
    plt.colorbar(im3, ax=axes[0, 2], shrink=0.6)
    
    plt.tight_layout()
    
    if save_path:
        plt.savefig(save_path, dpi=150, bbox_inches='tight')
    
    return fig

def evaluate_temporal_interpolation(matrices_dict, timepoints, save_dir=None):
    """
    Evaluate temporal interpolation quality
    
    Args:
        matrices_dict: dict with timepoints as keys and matrices as values
        timepoints: list of timepoints
        save_dir: directory to save plots
    
    Returns:
        results: evaluation results
    """
    results = {}
    
    # Check if we have endpoints
    if len(timepoints) < 2:
        return {'error': 'Need at least 2 timepoints'}
    
    early_time = min(timepoints)
    late_time = max(timepoints)
    
    if early_time not in matrices_dict or late_time not in matrices_dict:
        return {'error': 'Missing endpoint matrices'}
    
    early_matrix = matrices_dict[early_time]
    late_matrix = matrices_dict[late_time]
    
    # Calculate smoothness metrics
    for i, t in enumerate(sorted(timepoints)[1:-1], 1):  # Skip endpoints
        if t in matrices_dict:
            current_matrix = matrices_dict[t]
            
            # Calculate expected matrix by linear interpolation
            alpha = (t - early_time) / (late_time - early_time)
            expected_matrix = (1 - alpha) * early_matrix + alpha * late_matrix
            
            # Compare with actual
            metrics = evaluate_hic_quality(current_matrix, expected_matrix)
            results[f'timepoint_{t}'] = metrics
            
            # Visualize if save directory provided
            if save_dir:
                import os
                os.makedirs(save_dir, exist_ok=True)
                fig = visualize_hic_comparison(
                    current_matrix, expected_matrix, 
                    save_path=os.path.join(save_dir, f'interpolation_t{t:.3f}.png')
                )
                plt.close(fig)
    
    # Calculate overall temporal consistency
    temporal_corrs = []
    for i in range(len(timepoints) - 1):
        t1, t2 = sorted(timepoints)[i:i+2]
        if t1 in matrices_dict and t2 in matrices_dict:
            m1, m2 = matrices_dict[t1], matrices_dict[t2]
            flat1, flat2 = m1.flatten(), m2.flatten()
            mask = (flat1 > 0) | (flat2 > 0)
            if mask.sum() > 0:
                corr, _ = pearsonr(flat1[mask], flat2[mask])
                temporal_corrs.append(corr)
    
    if temporal_corrs:
        results['temporal_consistency'] = np.mean(temporal_corrs)
    
    return results
