#!/usr/bin/env python
"""
Demo script for Hi-C temporal interpolation using semantic guidance (zsem)

This script demonstrates how to:
1. Load a trained model with semantic encoder
2. Compute zsem for early and late Hi-C matrices
3. Interpolate zsem for intermediate timepoints
4. Generate Hi-C matrices using zsem guidance
5. Evaluate consistency between generated and true samples

Usage:
    python demo_semantic_guidance.py \
        --checkpoint_path /path/to/model/checkpoint \
        --early_matrix /path/to/early_hic.npy \
        --late_matrix /path/to/late_hic.npy \
        --output_dir /path/to/output \
        --target_timepoints 0.25,0.5,0.75
"""

import os
import argparse
import numpy as np
import torch as th
import matplotlib.pyplot as plt
from datetime import datetime

# Import your modules
import sys
sys.path.append('.')

from Squidiff import dist_util, logger
from Squidiff.hic_script_util import get_all_defaults, add_dict_to_argparser
from Squidiff.hic_inference import (
    load_model_from_checkpoint,
    compute_semantic_embedding,
    interpolate_semantic_embeddings,
    spherical_interpolate_semantic_embeddings,
    sample_with_semantic_guidance,
    evaluate_zsem_consistency
)

def visualize_zsem_interpolation(zsems, timepoints, save_path):
    """Visualize zsem interpolation trajectory"""
    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    
    # Plot zsem norms over time
    norms = [np.linalg.norm(zsem) for zsem in zsems['interpolated'].values()]
    axes[0, 0].plot(timepoints, norms, 'o-', linewidth=2, markersize=8)
    axes[0, 0].axhline(y=np.linalg.norm(zsems['early']), color='blue', linestyle='--', alpha=0.7, label='Early')
    axes[0, 0].axhline(y=np.linalg.norm(zsems['late']), color='red', linestyle='--', alpha=0.7, label='Late')
    axes[0, 0].set_xlabel('Biological Time')
    axes[0, 0].set_ylabel('||zsem||')
    axes[0, 0].set_title('zsem Norm Evolution')
    axes[0, 0].legend()
    axes[0, 0].grid(True, alpha=0.3)
    
    # Plot first few dimensions
    n_dims_plot = min(8, len(zsems['early']))
    for i in range(n_dims_plot):
        values = [zsem[i] for zsem in zsems['interpolated'].values()]
        axes[0, 1].plot(timepoints, values, 'o-', alpha=0.7, label=f'dim {i}')
    axes[0, 1].set_xlabel('Biological Time')
    axes[0, 1].set_ylabel('zsem Value')
    axes[0, 1].set_title(f'First {n_dims_plot} zsem Dimensions')
    axes[0, 1].legend(bbox_to_anchor=(1.05, 1), loc='upper left')
    axes[0, 1].grid(True, alpha=0.3)
    
    # Plot cosine similarities to early/late
    cos_sim_early = []
    cos_sim_late = []
    for zsem in zsems['interpolated'].values():
        cos_early = np.dot(zsem, zsems['early']) / (np.linalg.norm(zsem) * np.linalg.norm(zsems['early']))
        cos_late = np.dot(zsem, zsems['late']) / (np.linalg.norm(zsem) * np.linalg.norm(zsems['late']))
        cos_sim_early.append(cos_early)
        cos_sim_late.append(cos_late)
    
    axes[1, 0].plot(timepoints, cos_sim_early, 'o-', color='blue', label='Similarity to Early')
    axes[1, 0].plot(timepoints, cos_sim_late, 'o-', color='red', label='Similarity to Late')
    axes[1, 0].set_xlabel('Biological Time')
    axes[1, 0].set_ylabel('Cosine Similarity')
    axes[1, 0].set_title('zsem Similarity Evolution')
    axes[1, 0].legend()
    axes[1, 0].grid(True, alpha=0.3)
    
    # Plot 2D projection (PCA)
    from sklearn.decomposition import PCA
    all_zsems = np.array([zsems['early']] + 
                        list(zsems['interpolated'].values()) + 
                        [zsems['late']])
    
    if all_zsems.shape[1] >= 2:
        pca = PCA(n_components=2)
        zsems_2d = pca.fit_transform(all_zsems)
        
        # Plot trajectory
        axes[1, 1].plot(zsems_2d[1:-1, 0], zsems_2d[1:-1, 1], 'o-', alpha=0.7, label='Interpolated')
        axes[1, 1].scatter(zsems_2d[0, 0], zsems_2d[0, 1], color='blue', s=100, label='Early', marker='s')
        axes[1, 1].scatter(zsems_2d[-1, 0], zsems_2d[-1, 1], color='red', s=100, label='Late', marker='s')
        
        # Annotate timepoints
        for i, t in enumerate(timepoints):
            axes[1, 1].annotate(f't={t:.2f}', (zsems_2d[i+1, 0], zsems_2d[i+1, 1]),
                               xytext=(5, 5), textcoords='offset points', fontsize=8)
        
        axes[1, 1].set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.1%} var)')
        axes[1, 1].set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.1%} var)')
        axes[1, 1].set_title('zsem Trajectory (PCA)')
        axes[1, 1].legend()
        axes[1, 1].grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig(save_path, dpi=150, bbox_inches='tight')
    plt.close()

def compare_interpolation_methods(zsem_early, zsem_late, timepoints):
    """Compare linear vs spherical interpolation"""
    # Linear interpolation
    zsems_linear = interpolate_semantic_embeddings(zsem_early, zsem_late, timepoints)
    
    # Spherical interpolation
    zsems_slerp = spherical_interpolate_semantic_embeddings(zsem_early, zsem_late, timepoints)
    
    # Compare differences
    differences = []
    for i in range(len(timepoints)):
        diff = np.linalg.norm(zsems_linear[i] - zsems_slerp[i])
        differences.append(diff)
    
    logger.log("Interpolation method comparison:")
    logger.log(f"Mean difference: {np.mean(differences):.6f}")
    logger.log(f"Max difference: {np.max(differences):.6f}")
    
    return zsems_linear, zsems_slerp, differences

def demo_semantic_guidance():
    """Main demonstration function"""
    # Parse arguments
    defaults = get_all_defaults()
    parser = argparse.ArgumentParser(description='Demo: Hi-C Semantic Guidance')
    
    parser.add_argument('--checkpoint_path', type=str, required=True,
                       help='Path to model checkpoint')
    parser.add_argument('--early_matrix', type=str, required=True,
                       help='Path to early Hi-C matrix (.npy)')
    parser.add_argument('--late_matrix', type=str, required=True,
                       help='Path to late Hi-C matrix (.npy)')
    parser.add_argument('--output_dir', type=str, required=True,
                       help='Output directory')
    parser.add_argument('--target_timepoints', type=str, default='0.25,0.5,0.75',
                       help='Target timepoints for interpolation')
    parser.add_argument('--num_samples', type=int, default=3,
                       help='Number of samples per timepoint')
    parser.add_argument('--ddim_steps', type=int, default=50,
                       help='Number of DDIM steps')
    parser.add_argument('--use_slerp', action='store_true',
                       help='Use spherical interpolation')
    parser.add_argument('--true_samples_dir', type=str, default='',
                       help='Directory with true samples for evaluation')
    
    add_dict_to_argparser(parser, defaults)
    args = parser.parse_args()
    
    # Setup
    dist_util.setup_dist()
    os.makedirs(args.output_dir, exist_ok=True)
    logger.configure(dir=args.output_dir)
    
    timepoints = [float(t) for t in args.target_timepoints.split(',')]
    
    logger.log("=== Hi-C Semantic Guidance Demo ===")
    logger.log(f"Checkpoint: {args.checkpoint_path}")
    logger.log(f"Early matrix: {args.early_matrix}")
    logger.log(f"Late matrix: {args.late_matrix}")
    logger.log(f"Target timepoints: {timepoints}")
    logger.log(f"Interpolation method: {'SLERP' if args.use_slerp else 'Linear'}")
    
    # Load model
    logger.log("Loading model...")
    model, diffusion, checkpoint_info = load_model_from_checkpoint(
        args.checkpoint_path, vars(args), use_ema=True
    )
    
    # Load matrices
    logger.log("Loading Hi-C matrices...")
    early_matrix = np.load(args.early_matrix)
    late_matrix = np.load(args.late_matrix)
    
    logger.log(f"Early matrix shape: {early_matrix.shape}")
    logger.log(f"Late matrix shape: {late_matrix.shape}")
    
    # Compute semantic embeddings
    logger.log("Computing semantic embeddings...")
    zsem_early = compute_semantic_embedding(model, early_matrix)
    zsem_late = compute_semantic_embedding(model, late_matrix)
    
    logger.log(f"zsem dimension: {len(zsem_early)}")
    logger.log(f"Early zsem norm: {np.linalg.norm(zsem_early):.4f}")
    logger.log(f"Late zsem norm: {np.linalg.norm(zsem_late):.4f}")
    
    # Cosine similarity between early and late
    cosine_sim = np.dot(zsem_early, zsem_late) / (
        np.linalg.norm(zsem_early) * np.linalg.norm(zsem_late)
    )
    logger.log(f"Early-Late cosine similarity: {cosine_sim:.4f}")
    
    # Compare interpolation methods
    logger.log("Comparing interpolation methods...")
    zsems_linear, zsems_slerp, differences = compare_interpolation_methods(
        zsem_early, zsem_late, timepoints
    )
    
    # Generate samples using semantic guidance
    logger.log("Generating samples with semantic guidance...")
    results, zsems = sample_with_semantic_guidance(
        model, diffusion, early_matrix, late_matrix, timepoints,
        matrix_size=args.window_size, num_samples=args.num_samples,
        ddim_steps=args.ddim_steps, use_slerp=args.use_slerp,
        clip_mode='nonneg', use_fp16=False
    )
    
    # Save results
    logger.log("Saving results...")
    
    # Save zsems
    np.save(os.path.join(args.output_dir, 'zsem_early.npy'), zsem_early)
    np.save(os.path.join(args.output_dir, 'zsem_late.npy'), zsem_late)
    
    for i, t in enumerate(timepoints):
        # Save samples
        samples = results[t]  # [num_samples, H, W]
        np.save(os.path.join(args.output_dir, f'samples_t{t:.3f}.npy'), samples)
        
        # Save average
        avg_sample = samples.mean(axis=0)
        np.save(os.path.join(args.output_dir, f'average_t{t:.3f}.npy'), avg_sample)
        
        # Save zsem
        np.save(os.path.join(args.output_dir, f'zsem_t{t:.3f}.npy'), zsems['interpolated'][t])
        
        # Visualize sample
        fig, axes = plt.subplots(1, 2, figsize=(12, 5))
        
        # Average sample
        im1 = axes[0].imshow(avg_sample, cmap='Reds', origin='lower')
        axes[0].set_title(f'Generated Hi-C at t={t:.3f}')
        axes[0].set_xlabel('Genomic Position')
        axes[0].set_ylabel('Genomic Position')
        plt.colorbar(im1, ax=axes[0], shrink=0.8)
        
        # Standard deviation
        if args.num_samples > 1:
            std_sample = samples.std(axis=0)
            im2 = axes[1].imshow(std_sample, cmap='Blues', origin='lower')
            axes[1].set_title(f'Std Dev at t={t:.3f}')
            axes[1].set_xlabel('Genomic Position')
            axes[1].set_ylabel('Genomic Position')
            plt.colorbar(im2, ax=axes[1], shrink=0.8)
        else:
            axes[1].text(0.5, 0.5, 'Single Sample\n(No Std Dev)', 
                        ha='center', va='center', transform=axes[1].transAxes)
        
        plt.tight_layout()
        plt.savefig(os.path.join(args.output_dir, f'sample_t{t:.3f}.png'), 
                   dpi=150, bbox_inches='tight')
        plt.close()
    
    # Visualize zsem interpolation
    logger.log("Creating zsem visualization...")
    visualize_zsem_interpolation(zsems, timepoints,
                                os.path.join(args.output_dir, 'zsem_interpolation.png'))
    
    # Evaluate zsem consistency if true samples provided
    if args.true_samples_dir and os.path.exists(args.true_samples_dir):
        logger.log("Evaluating zsem consistency...")
        
        # Load true samples
        true_samples = {}
        for t in timepoints:
            true_file = os.path.join(args.true_samples_dir, f'true_t{t:.3f}.npy')
            if os.path.exists(true_file):
                true_samples[t] = np.load(true_file)
        
        if true_samples:
            generated_samples = {t: results[t] for t in timepoints}
            evaluation = evaluate_zsem_consistency(model, generated_samples, true_samples)
            
            # Save evaluation
            import json
            eval_path = os.path.join(args.output_dir, 'zsem_evaluation.json')
            
            # Convert numpy arrays to lists for JSON serialization
            eval_serializable = {}
            for key, value in evaluation.items():
                if isinstance(value, dict):
                    eval_serializable[key] = {}
                    for k, v in value.items():
                        if isinstance(v, np.ndarray):
                            eval_serializable[key][k] = v.tolist()
                        else:
                            eval_serializable[key][k] = float(v) if np.isscalar(v) else v
                else:
                    eval_serializable[key] = value
            
            with open(eval_path, 'w') as f:
                json.dump(eval_serializable, f, indent=2)
            
            if 'overall' in evaluation:
                logger.log(f"Overall zsem consistency: "
                          f"{evaluation['overall']['mean_cosine_similarity']:.4f}±"
                          f"{evaluation['overall']['std_cosine_similarity']:.4f}")
    
    # Create summary
    logger.log("Creating summary...")
    summary_path = os.path.join(args.output_dir, 'demo_summary.txt')
    with open(summary_path, 'w') as f:
        f.write("Hi-C Semantic Guidance Demo Summary\n")
        f.write(f"Generated at: {datetime.now()}\n")
        f.write(f"Checkpoint: {args.checkpoint_path}\n")
        f.write(f"Checkpoint info: {checkpoint_info}\n\n")
        
        f.write("Input data:\n")
        f.write(f"Early matrix: {args.early_matrix} (shape: {early_matrix.shape})\n")
        f.write(f"Late matrix: {args.late_matrix} (shape: {late_matrix.shape})\n\n")
        
        f.write("zsem properties:\n")
        f.write(f"Dimension: {len(zsem_early)}\n")
        f.write(f"Early norm: {np.linalg.norm(zsem_early):.4f}\n")
        f.write(f"Late norm: {np.linalg.norm(zsem_late):.4f}\n")
        f.write(f"Early-Late cosine similarity: {cosine_sim:.4f}\n\n")
        
        f.write("Generation settings:\n")
        f.write(f"Target timepoints: {timepoints}\n")
        f.write(f"Samples per timepoint: {args.num_samples}\n")
        f.write(f"DDIM steps: {args.ddim_steps}\n")
        f.write(f"Interpolation method: {'SLERP' if args.use_slerp else 'Linear'}\n")
        f.write(f"Matrix size: {args.window_size}x{args.window_size}\n\n")
        
        f.write("Interpolation method comparison:\n")
        f.write(f"Mean difference (Linear vs SLERP): {np.mean(differences):.6f}\n")
        f.write(f"Max difference (Linear vs SLERP): {np.max(differences):.6f}\n")
    
    logger.log(f"Demo completed! Results saved to: {args.output_dir}")
    logger.log("Key outputs:")
    logger.log(f"  - Generated samples: samples_t*.npy")
    logger.log(f"  - Semantic embeddings: zsem_*.npy")
    logger.log(f"  - Visualization: zsem_interpolation.png")
    logger.log(f"  - Summary: demo_summary.txt")

if __name__ == "__main__":
    demo_semantic_guidance() 