"""
Enhanced Hi-C Diffusion Model Training with VAE Integration
Supports both independent VAE training and joint diffusion+VAE training
"""
import argparse
import copy
import os
import glob
import torch
import numpy as np
from torch.utils.data import DataLoader
import json
from datetime import datetime
import functools
import matplotlib.pyplot as plt

from Squidiff import dist_util, logger
from Squidiff.hic_script_util import (
    get_all_defaults,
    create_model_and_diffusion,
    args_to_dict,
    add_dict_to_argparser,
)
from Squidiff.hic_datasets import create_hic_dataloader
from Squidiff.hic_train_util import HiCTrainLoop, log_loss_dict
from Squidiff.hic_evaluation import evaluate_hic_quality, visualize_hic_comparison
from Squidiff.hic_visualization import visualize_comparison, visualize_comparison_with_metrics
from Squidiff.vae_encoder import HiCVAE
from Squidiff.resample import LossAwareSampler


class EnhancedHiCTrainLoop(HiCTrainLoop):
    """Enhanced training loop with VAE support and validation"""
    
    def __init__(self, validation_loader=None, validation_interval=5000, 
                 validation_samples=4, use_vae_regularization=False, 
                 vae_reg_weight=1e-4, **kwargs):
        super().__init__(**kwargs)
        
        self.validation_loader = validation_loader
        self.validation_interval = validation_interval
        self.validation_samples = validation_samples
        self.use_vae_regularization = use_vae_regularization
        self.vae_reg_weight = vae_reg_weight
        
        # Metrics tracking
        self.validation_metrics = []
        self.best_val_loss = float('inf')
        self.best_checkpoint_files = []  # 记录best checkpoint文件列表
        
        logger.log(f"Enhanced training loop initialized:")
        logger.log(f"  Validation enabled: {validation_loader is not None}")
        logger.log(f"  VAE regularization: {use_vae_regularization}")
        logger.log(f"  Validation interval: {validation_interval}")
        logger.log(f"  Strategy: Only save best model")
    
    def run_step(self, batch):
        """Enhanced training step with VAE regularization"""
        self.forward_backward(batch)
        took_step = self.mp_trainer.optimize(self.opt)
        if took_step:
            self._update_ema()
        self._anneal_lr()
        self.log_step()
        
        # Validation
        if (self.validation_loader is not None and 
            self.step % self.validation_interval == 0 and
            self.step > 0):
            self.validate()
    
    def forward_backward(self, batch):
        """Enhanced forward/backward with VAE regularization"""
        # Reuse base helper to reduce duplication
        self.mp_trainer.zero_grad()
        hic_data = batch['hic'].to(dist_util.dev())
        bio_time = batch['time'].to(dist_util.dev())
        last_tracked = None
        for i in range(0, hic_data.shape[0], self.microbatch):
            micro_hic = hic_data[i : i + self.microbatch]
            micro_bio_time = bio_time[i : i + self.microbatch]
            last_batch = (i + self.microbatch) >= hic_data.shape[0]

            def _vae_extra(x):
                if self.use_vae_regularization and hasattr(self.model, 'get_semantic_loss'):
                    return self.model.get_semantic_loss(x) * self.vae_reg_weight
                return None

            # Use shared microbatch trainer from base class
            last_tracked = super()._train_microbatch(micro_hic, micro_bio_time, last_batch, extra_loss_fn=_vae_extra)
        if last_tracked is not None:
            self.loss_list.append(float(last_tracked.item())) 
    
    def validate(self):
        """Validation loop testing RECONSTRUCTION ability at different noise levels"""
        logger.log(f"\n{'='*70}")
        logger.log(f"🔍 Testing reconstruction ability at step {self.step + self.resume_step}")
        logger.log(f"{'='*70}")
        
        self.model.eval()
        
        # Test reconstruction at different noise levels
        test_timesteps = [100, 300, 500, 700, 900]
        reconstruction_metrics = {}
        val_samples_for_vis = []
        
        with torch.no_grad():
            # Get one batch for testing
            val_batch = next(iter(self.validation_loader))
            hic_data = val_batch['hic'].to(dist_util.dev())
            bio_time = val_batch['time'].to(dist_util.dev())
            
            # Use EMA weights for better quality
            backup_state = copy.deepcopy(self.model.state_dict())
            try:
                if hasattr(self, 'ema_params') and len(self.ema_params) > 0:
                    ema_state = self.mp_trainer.master_params_to_state_dict(self.ema_params[-1])
                    self.model.load_state_dict(ema_state, strict=False)
                    logger.log("✓ Using EMA weights for validation")
            except Exception as e:
                logger.log(f"⚠ Failed to load EMA weights: {e}")
            
            logger.log(f"\nOriginal data:")
            logger.log(f"  Shape: {hic_data.shape}")
            logger.log(f"  Range: [{hic_data.min():.4f}, {hic_data.max():.4f}]")
            logger.log(f"  Mean: {hic_data.mean():.4f}, Std: {hic_data.std():.4f}")
            logger.log("")
            
            # Test reconstruction at each noise level
            for t_val in test_timesteps:
                logger.log(f"Testing t={t_val}...")
                
                t = torch.full((hic_data.shape[0],), t_val, device=dist_util.dev(), dtype=torch.long)
                
                # 1. Add noise to original data
                noise = torch.randn_like(hic_data)
                x_t = self.diffusion.q_sample(hic_data, t, noise)
                
                # 2. Predict x_0 from x_t (one-step reconstruction)
                model_output = self.diffusion.p_mean_variance(
                    self.model,
                    x_t,
                    t,
                    clip_denoised=True,
                    model_kwargs={'bio_time': bio_time}
                )
                x_0_pred = model_output['pred_xstart']
                
                # 3. Calculate reconstruction metrics
                # Pearson correlation
                orig_flat = hic_data.view(hic_data.shape[0], -1).cpu().numpy()
                pred_flat = x_0_pred.view(x_0_pred.shape[0], -1).cpu().numpy()
                
                pearson_corrs = []
                spearman_corrs = []
                mse_values = []
                
                for i in range(orig_flat.shape[0]):
                    from scipy.stats import pearsonr, spearmanr
                    try:
                        p_corr, _ = pearsonr(orig_flat[i], pred_flat[i])
                        s_corr, _ = spearmanr(orig_flat[i], pred_flat[i])
                        mse = np.mean((orig_flat[i] - pred_flat[i]) ** 2)
                        
                        if not np.isnan(p_corr):
                            pearson_corrs.append(p_corr)
                        if not np.isnan(s_corr):
                            spearman_corrs.append(s_corr)
                        mse_values.append(mse)
                    except:
                        pass
                
                avg_pearson = np.mean(pearson_corrs) if pearson_corrs else 0.0
                avg_spearman = np.mean(spearman_corrs) if spearman_corrs else 0.0
                avg_mse = np.mean(mse_values) if mse_values else 0.0
                
                reconstruction_metrics[f'recon_pearson_t{t_val}'] = avg_pearson
                reconstruction_metrics[f'recon_spearman_t{t_val}'] = avg_spearman
                reconstruction_metrics[f'recon_mse_t{t_val}'] = avg_mse
                
                logger.log(f"  Pearson:  {avg_pearson:.4f}")
                logger.log(f"  Spearman: {avg_spearman:.4f}")
                logger.log(f"  MSE:      {avg_mse:.4f}")
                
                # Save samples for visualization (only for t=500)
                if t_val == 500:
                    val_samples_for_vis.append({
                        'original': hic_data.cpu().numpy(),
                        'noisy': x_t.cpu().numpy(),
                        'reconstructed': x_0_pred.cpu().numpy(),
                        'bio_time': bio_time.cpu().numpy(),
                        't': t_val
                    })
            
            # Restore original weights
            try:
                self.model.load_state_dict(backup_state, strict=False)
            except Exception:
                pass
        
        # Calculate average reconstruction score (weighted by noise level)
        # Lower noise should have higher weight
        weights = {100: 1.0, 300: 0.8, 500: 0.6, 700: 0.4, 900: 0.2}
        weighted_pearson = sum(
            reconstruction_metrics[f'recon_pearson_t{t}'] * weights[t] 
            for t in test_timesteps
        ) / sum(weights.values())
        
        reconstruction_metrics['recon_pearson_weighted'] = weighted_pearson
        
        # Use weighted Pearson as the main metric for best model selection
        avg_val_metric = weighted_pearson
        
        logger.log(f"\n📊 Overall reconstruction score (weighted): {weighted_pearson:.4f}")
        logger.log(f"{'='*70}\n")
        
        # Log validation results
        validation_result = {
            'step': self.step + self.resume_step,
            'recon_score': weighted_pearson,
            **reconstruction_metrics
        }
        
        self.validation_metrics.append(validation_result)
        
        # Check if best model (higher is better for correlation)
        is_best = weighted_pearson > -self.best_val_loss  # Use negative to track "best" as highest
        if is_best:
            old_best = -self.best_val_loss
            self.best_val_loss = -weighted_pearson  # Store negative
            current_step = self.step + self.resume_step
            
            logger.log(f"🎉 New best reconstruction score: {weighted_pearson:.4f} (previous: {old_best:.4f})")
            logger.log(f"   Saving best model at step {current_step}...")
            
            # 删除旧的best checkpoint
            self._remove_old_best_checkpoints()
            
            # 保存新的best checkpoint
            self._save_best_checkpoint(current_step)
        
        # Log metrics
        logger.log(f"Reconstruction metrics: {validation_result}")
        
        # Save reconstruction visualization
        if val_samples_for_vis:
            self.save_reconstruction_samples(val_samples_for_vis[0], is_best)
        
        self.model.train()
        return validation_result
    
    def _save_best_checkpoint(self, step):
        """保存best checkpoint"""
        checkpoint_dir = self.resume_checkpoint
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir, exist_ok=True)
        
        saved_files = []
        
        # 保存主模型
        state_dict = self.mp_trainer.master_params_to_state_dict(self.mp_trainer.master_params)
        model_path = os.path.join(checkpoint_dir, f"best_model{step:06d}.pt")
        with open(model_path, "wb") as f:
            torch.save(state_dict, f)
        saved_files.append(model_path)
        logger.log(f"   ✓ Saved: best_model{step:06d}.pt")
        
        # 保存EMA权重
        for rate, params in zip(self.ema_rate, self.ema_params):
            ema_state = self.mp_trainer.master_params_to_state_dict(params)
            ema_path = os.path.join(checkpoint_dir, f"best_ema_{rate}_{step:06d}.pt")
            with open(ema_path, "wb") as f:
                torch.save(ema_state, f)
            saved_files.append(ema_path)
            logger.log(f"   ✓ Saved: best_ema_{rate}_{step:06d}.pt")
        
        # 保存优化器状态
        opt_path = os.path.join(checkpoint_dir, f"best_opt{step:06d}.pt")
        with open(opt_path, "wb") as f:
            torch.save(self.opt.state_dict(), f)
        saved_files.append(opt_path)
        logger.log(f"   ✓ Saved: best_opt{step:06d}.pt")
        
        # 记录保存的文件
        self.best_checkpoint_files = saved_files
    
    def _remove_old_best_checkpoints(self):
        """删除旧的best checkpoint"""
        if not self.best_checkpoint_files:
            return
        
        logger.log(f"   Removing old best checkpoints...")
        for filepath in self.best_checkpoint_files:
            try:
                if os.path.exists(filepath):
                    os.remove(filepath)
                    logger.log(f"   ✗ Removed: {os.path.basename(filepath)}")
            except Exception as e:
                logger.log(f"   ⚠ Failed to remove {os.path.basename(filepath)}: {e}")
        
        self.best_checkpoint_files = []
    
    def save_reconstruction_samples(self, sample_batch, is_best=False):
        """Save reconstruction sample visualizations"""
        save_dir = os.path.join(self.resume_checkpoint, 'reconstruction_samples')
        os.makedirs(save_dir, exist_ok=True)
        
        orig = sample_batch['original']
        noisy = sample_batch['noisy']
        recon = sample_batch['reconstructed']
        t_val = sample_batch['t']
        
        # Create reconstruction plots (original -> noisy -> reconstructed)
        for j in range(min(4, orig.shape[0])):
            save_path = os.path.join(
                save_dir, 
                f'step_{self.step + self.resume_step:06d}_t{t_val}_sample_{j}.png'
            )
            
            # Create 3-panel comparison
            fig, axes = plt.subplots(1, 3, figsize=(18, 6))
            
            # Original
            im0 = axes[0].imshow(orig[j, 0], cmap='RdYlBu_r', aspect='auto')
            axes[0].set_title(f'Original\nmean={orig[j,0].mean():.3f}')
            axes[0].axis('off')
            plt.colorbar(im0, ax=axes[0], fraction=0.046)
            
            # Noisy (x_t)
            im1 = axes[1].imshow(noisy[j, 0], cmap='RdYlBu_r', aspect='auto')
            axes[1].set_title(f'Noisy (t={t_val})\nmean={noisy[j,0].mean():.3f}')
            axes[1].axis('off')
            plt.colorbar(im1, ax=axes[1], fraction=0.046)
            
            # Reconstructed
            im2 = axes[2].imshow(recon[j, 0], cmap='RdYlBu_r', aspect='auto')
            
            # Calculate correlation
            from scipy.stats import pearsonr
            try:
                corr, _ = pearsonr(orig[j, 0].flatten(), recon[j, 0].flatten())
                corr_str = f'Pearson: {corr:.4f}'
            except:
                corr_str = 'Pearson: N/A'
            
            axes[2].set_title(f'Reconstructed\nmean={recon[j,0].mean():.3f}\n{corr_str}')
            axes[2].axis('off')
            plt.colorbar(im2, ax=axes[2], fraction=0.046)
            
            plt.suptitle(f'Reconstruction Test - Step {self.step + self.resume_step}', fontsize=14, y=1.02)
            plt.tight_layout()
            plt.savefig(save_path, dpi=150, bbox_inches='tight')
            plt.close()
            
            # Save best samples separately
            if is_best:
                best_path = os.path.join(
                    save_dir, 
                    f'best_reconstruction_sample_{j}.png'
                )
                
                fig, axes = plt.subplots(1, 3, figsize=(18, 6))
                
                im0 = axes[0].imshow(orig[j, 0], cmap='RdYlBu_r', aspect='auto')
                axes[0].set_title(f'Original')
                axes[0].axis('off')
                plt.colorbar(im0, ax=axes[0], fraction=0.046)
                
                im1 = axes[1].imshow(noisy[j, 0], cmap='RdYlBu_r', aspect='auto')
                axes[1].set_title(f'Noisy (t={t_val})')
                axes[1].axis('off')
                plt.colorbar(im1, ax=axes[1], fraction=0.046)
                
                im2 = axes[2].imshow(recon[j, 0], cmap='RdYlBu_r', aspect='auto')
                try:
                    corr, _ = pearsonr(orig[j, 0].flatten(), recon[j, 0].flatten())
                    corr_str = f'Pearson: {corr:.4f}'
                except:
                    corr_str = 'N/A'
                axes[2].set_title(f'Reconstructed\n{corr_str}')
                axes[2].axis('off')
                plt.colorbar(im2, ax=axes[2], fraction=0.046)
                
                plt.suptitle(f'Best Reconstruction - Step {self.step + self.resume_step}', fontsize=14, y=1.02)
                plt.tight_layout()
                plt.savefig(best_path, dpi=150, bbox_inches='tight')
                plt.close()
    
    def save(self):
        """Enhanced save with validation metrics - only save best model"""
        # 不再自动保存常规checkpoint，只在验证时保存best
        logger.log(f"Skipping regular checkpoint save (only saving best model)")
        
        # Save validation metrics
        if self.validation_metrics:
            metrics_path = os.path.join(self.resume_checkpoint, 'validation_metrics.json')
            # Convert NumPy types to native Python for JSON serialization
            import numpy as np

            def _py(obj):
                if isinstance(obj, (np.floating, np.integer)):
                    return obj.item()
                if isinstance(obj, np.ndarray):
                    return obj.tolist()
                return obj

            serializable = [
                {k: _py(v) for k, v in rec.items()} for rec in self.validation_metrics
            ]

            with open(metrics_path, 'w') as f:
                json.dump(serializable, f, indent=2)


def create_enhanced_model_and_diffusion(args):
    """Create model and diffusion with enhanced VAE support"""
    # Override image_size with window_size
    model_args = args_to_dict(args, [
        'in_channels', 'model_channels', 'out_channels', 
        'num_res_blocks', 'attention_resolutions', 'dropout', 'channel_mult',
        'conv_resample', 'dims', 'num_classes', 'use_checkpoint', 'use_fp16',
        'num_heads', 'num_head_channels', 'num_heads_upsample', 
        'use_scale_shift_norm', 'resblock_updown', 'use_new_attention_order',
        'use_spatial_transformer', 'transformer_depth', 'context_dim', 
        'time_cond_dim', 'use_transition_anchors', 'cond_drop_prob',
        'sem_cond_dim', 'use_semantic_encoder', 'sem_drop_prob',
        'use_vae_encoder', 'vae_encoder_path', 'freeze_vae_encoder',
    ])
    model_args['image_size'] = args['window_size']
    
    diffusion_args = args_to_dict(args, [
        'learn_sigma', 'diffusion_steps', 'noise_schedule',
        'timestep_respacing', 'use_kl', 'predict_xstart', 'rescale_timesteps',
        'rescale_learned_sigmas'
    ])
    
    model, diffusion = create_model_and_diffusion(**model_args, **diffusion_args)
    
    return model, diffusion


def parse_enhanced_args():
    """Parse command-line arguments for enhanced training"""
    defaults = get_all_defaults()
    
    # Add enhanced training arguments
    enhanced_defaults = {
        'validation_split': 0.1,
        'validation_interval': 1000,  # 频繁验证以便及时发现最优模型
        'validation_samples': 4,
        'use_vae_regularization': False,
        'vae_reg_weight': 1e-4,
        'save_validation_samples': True,
    }
    defaults.update(enhanced_defaults)
    
    parser = argparse.ArgumentParser(description='Enhanced Hi-C Diffusion Training with VAE Support')
    add_dict_to_argparser(parser, defaults)
    
    args = parser.parse_args()
    return vars(args)


def main():
    """Enhanced training main function"""
    args = parse_enhanced_args()
    
    # Setup
    dist_util.setup_dist()
    logger.configure(dir=args['logger_path'] or './enhanced_logs')
    
    # Save training configuration
    config_path = os.path.join(args['logger_path'] or './enhanced_logs', 'training_config.json')
    with open(config_path, 'w') as f:
        json.dump(args, f, indent=2)
    
    logger.log("=== Enhanced Hi-C Diffusion Training ===")
    logger.log(f"Configuration saved to: {config_path}")
    logger.log(f"VAE encoder enabled: {args['use_vae_encoder']}")
    logger.log(f"VAE encoder path: {args['vae_encoder_path']}")
    logger.log(f"Freeze VAE encoder: {args['freeze_vae_encoder']}")
    logger.log(f"Validation split: {args['validation_split']}")
    logger.log(f"VAE regularization: {args['use_vae_regularization']}")
    
    # Create full dataloader
    full_dataloader = create_hic_dataloader(
        data_dir=args['data_dir'],
        batch_size=args['batch_size'],
        timepoints=args['timepoints'].split(','),
        file_format=args['file_format'],
        chromosome=args['chromosome'],
        resolution=args['resolution'],
        window_size=args['window_size'],
        normalization=args['normalization'],
        log_transform=args['log_transform'],
        output_range=args.get('output_range', '0,1'),
        interpolation_prob=args['interpolation_prob'],
        augmentation=True,
        random_window=not args['use_grid_windows'],
        use_grid_windows=args['use_grid_windows'],
        window_stride=args['window_stride'],
        band_limit_bp=args['band_limit_bp'],
        band_limit_bins=args['band_limit_bins'],
        num_workers=4,
    )
    
    # Split into train/validation
    dataset_size = len(full_dataloader.dataset)
    val_size = int(args['validation_split'] * dataset_size)
    train_size = dataset_size - val_size
    
    if val_size > 0:
        train_dataset, val_dataset = torch.utils.data.random_split(
            full_dataloader.dataset, [train_size, val_size]
        )
        
        train_loader = DataLoader(
            train_dataset,
            batch_size=args['batch_size'],
            shuffle=True,
            num_workers=4,
            pin_memory=True,
        )
        
        val_loader = DataLoader(
            val_dataset,
            batch_size=min(args['batch_size'], 8),  # Smaller batch for validation
            shuffle=False,
            num_workers=2,
            pin_memory=True,
        )
        
        logger.log(f"Dataset split - Train: {train_size}, Validation: {val_size}")
    else:
        train_loader = full_dataloader
        val_loader = None
        logger.log(f"No validation split - Training on full dataset: {dataset_size}")
    
    # Create model and diffusion
    model, diffusion = create_enhanced_model_and_diffusion(args)
    
    # Log model info
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    logger.log(f"Model parameters - Total: {total_params:,}, Trainable: {trainable_params:,}")
    
    # Create enhanced training loop
    training_args = args_to_dict(args, [
        'microbatch', 'lr', 'ema_rate', 'log_interval', 'save_interval',
        'resume_checkpoint', 'use_fp16', 'fp16_scale_growth', 'weight_decay',
        'lr_anneal_steps', 'multi_t_training', 'num_t_per_batch', 't_multi_strategy', 't_stride'
    ])
    
    train_loop = EnhancedHiCTrainLoop(
        model=model,
        diffusion=diffusion,
        data=train_loader,
        batch_size=args['batch_size'],
        validation_loader=val_loader,
        validation_interval=args['validation_interval'],
        validation_samples=args['validation_samples'],
        use_vae_regularization=args['use_vae_regularization'],
        vae_reg_weight=args['vae_reg_weight'],
        **training_args
    )
    
    logger.log("Starting enhanced training...")
    train_loop.run_loop()
    
    # Final validation and cleanup
    if val_loader:
        logger.log("Running final validation...")
        final_metrics = train_loop.validate()
        logger.log(f"Final validation metrics: {final_metrics}")
    
    logger.log("Enhanced training completed!")


if __name__ == "__main__":
    main() 