"""
VAE Pre-training Script for Hi-C Semantic Encoding
Trains VAE encoder independently before diffusion model training
"""
import argparse
import os
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import json

from Squidiff import dist_util, logger
from Squidiff.vae_encoder import HiCVAE
from Squidiff.hic_datasets import create_hic_dataloader
from Squidiff.hic_script_util import hic_data_defaults, add_dict_to_argparser
from Squidiff.hic_evaluation import evaluate_hic_quality


class VAETrainer:
    """VAE training manager with validation and checkpointing"""
    
    def __init__(
        self,
        model,
        train_loader,
        val_loader,
        learning_rate=1e-4,
        weight_decay=1e-5,
        beta_schedule='constant',
        beta_start=1e-4,
        beta_end=1e-2,
        beta_warmup_steps=10000,
        save_dir='./vae_checkpoints',
        log_interval=100,
        save_interval=5000,
        # validation_interval=1000,
        validation_interval=10,
        device='cuda',
    ):
        self.model = model.to(device)
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.device = device
        self.save_dir = save_dir
        self.log_interval = log_interval
        self.save_interval = save_interval
        self.validation_interval = validation_interval
        
        # Optimizer
        self.optimizer = optim.AdamW(
            model.parameters(), lr=learning_rate, weight_decay=weight_decay
        )
        
        # Beta scheduling for KL annealing
        self.beta_schedule = beta_schedule
        self.beta_start = beta_start
        self.beta_end = beta_end
        self.beta_warmup_steps = beta_warmup_steps
        
        # Training state
        self.step = 0
        self.epoch = 0
        self.best_val_loss = float('inf')
        
        # Logging
        os.makedirs(save_dir, exist_ok=True)
        self.writer = SummaryWriter(os.path.join(save_dir, 'tensorboard'))
        
        # Metrics tracking
        self.train_losses = []
        self.val_losses = []
        
        logger.log(f"VAE Trainer initialized:")
        logger.log(f"  Model parameters: {sum(p.numel() for p in model.parameters()):,}")
        logger.log(f"  Training samples: {len(train_loader.dataset)}")
        logger.log(f"  Validation samples: {len(val_loader.dataset) if val_loader else 0}")
        logger.log(f"  Device: {device}")
    
    def get_beta(self):
        """Get current beta value based on schedule"""
        if self.beta_schedule == 'constant':
            return self.model.beta
        elif self.beta_schedule == 'linear':
            if self.step < self.beta_warmup_steps:
                return self.beta_start + (self.beta_end - self.beta_start) * (self.step / self.beta_warmup_steps)
            else:
                return self.beta_end
        elif self.beta_schedule == 'cyclical':
            cycle_length = self.beta_warmup_steps
            cycle_position = (self.step % cycle_length) / cycle_length
            return self.beta_start + (self.beta_end - self.beta_start) * (0.5 * (1 + np.cos(np.pi * cycle_position)))
        else:
            return self.model.beta
    
    def train_step(self, batch):
        """Single training step"""
        self.model.train()
        self.optimizer.zero_grad()
        
        # Get data
        hic_data = batch['hic'].to(self.device)  # [B, 1, H, W]
        
        # Forward pass
        recon_x, mu, logvar, z = self.model(hic_data)
        
        # Update beta
        current_beta = self.get_beta()
        self.model.beta = current_beta
        
        # Compute loss
        loss_dict = self.model.compute_loss(hic_data, recon_x, mu, logvar)
        loss = loss_dict['total_loss']
        
        # Backward pass
        loss.backward()
        
        # Gradient clipping
        torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
        
        self.optimizer.step()
        self.step += 1
        
        return loss_dict, {'beta': current_beta}
    
    def validate(self):
        """Validation loop"""
        if not self.val_loader:
            return {}
        
        self.model.eval()
        val_losses = []
        val_metrics = []
        
        with torch.no_grad():
            for batch in self.val_loader:
                hic_data = batch['hic'].to(self.device)
                
                # Forward pass
                recon_x, mu, logvar, z = self.model(hic_data)
                
                # Compute loss
                loss_dict = self.model.compute_loss(hic_data, recon_x, mu, logvar)
                val_losses.append(loss_dict)
                
                # Compute reconstruction quality metrics
                if len(val_metrics) < 10:  # Only compute for first few batches
                    for i in range(min(4, hic_data.size(0))):
                        orig = hic_data[i, 0].cpu().numpy()
                        recon = recon_x[i, 0].cpu().numpy()
                        metrics = evaluate_hic_quality(recon, orig)
                        val_metrics.append(metrics)
        
        # Average losses (handle floats and tensors)
        avg_losses = {}
        for key in val_losses[0].keys():
            vals = [loss[key] for loss in val_losses]
            vals = [v.item() if torch.is_tensor(v) else float(v) for v in vals]
            avg_losses[f'val_{key}'] = float(np.mean(vals))
        
        # Average metrics
        if val_metrics:
            avg_metrics = {}
            for key in val_metrics[0].keys():
                if key != 'error':
                    values = [m[key] for m in val_metrics if key in m and not np.isnan(m[key])]
                    if values:
                        avg_metrics[f'val_{key}'] = float(np.mean(values))
            avg_losses.update(avg_metrics)
        
        return avg_losses
    
    def save_checkpoint(self, is_best=False):
        """Save model checkpoint"""
        checkpoint = {
            'step': self.step,
            'epoch': self.epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'best_val_loss': self.best_val_loss,
            'train_losses': self.train_losses,
            'val_losses': self.val_losses,
        }
        
        # Save latest checkpoint
        torch.save(checkpoint, os.path.join(self.save_dir, 'latest_checkpoint.pt'))
        
        # Save step-specific checkpoint
        torch.save(checkpoint, os.path.join(self.save_dir, f'checkpoint_step_{self.step:06d}.pt'))
        
        # Save best checkpoint
        if is_best:
            torch.save(checkpoint, os.path.join(self.save_dir, 'best_checkpoint.pt'))
            
        # Save encoder only for diffusion model integration
        encoder_state = {
            'encoder_state_dict': self.model.encoder.state_dict(),
            'latent_dim': self.model.latent_dim,
            'step': self.step,
        }
        torch.save(encoder_state, os.path.join(self.save_dir, 'vae_encoder.pt'))
        if is_best:
            torch.save(encoder_state, os.path.join(self.save_dir, 'vae_encoder_best.pt'))
    
    def load_checkpoint(self, checkpoint_path):
        """Load checkpoint"""
        checkpoint = torch.load(checkpoint_path, map_location=self.device)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.step = checkpoint['step']
        self.epoch = checkpoint['epoch']
        self.best_val_loss = checkpoint['best_val_loss']
        self.train_losses = checkpoint.get('train_losses', [])
        self.val_losses = checkpoint.get('val_losses', [])
        logger.log(f"Loaded checkpoint from step {self.step}")
    
    def generate_samples(self, num_samples=4, save_path=None):
        """Generate and visualize sample reconstructions"""
        self.model.eval()
        
        # Get a batch from validation set
        if self.val_loader:
            data_loader = self.val_loader
        else:
            data_loader = self.train_loader
            
        with torch.no_grad():
            batch = next(iter(data_loader))
            hic_data = batch['hic'][:num_samples].to(self.device)
            
            # Reconstruct
            recon_x, mu, logvar, z = self.model(hic_data)
            
            # Move to CPU
            originals = hic_data.cpu().numpy()
            reconstructions = recon_x.cpu().numpy()
            
            # Plot
            fig, axes = plt.subplots(2, num_samples, figsize=(4 * num_samples, 8))
            if num_samples == 1:
                axes = axes.reshape(2, 1)
            
            for i in range(num_samples):
                # Original
                im1 = axes[0, i].imshow(originals[i, 0], cmap='Reds', origin='lower')
                axes[0, i].set_title(f'Original {i+1}')
                axes[0, i].axis('off')
                
                # Reconstruction
                im2 = axes[1, i].imshow(reconstructions[i, 0], cmap='Reds', origin='lower')
                axes[1, i].set_title(f'Reconstructed {i+1}')
                axes[1, i].axis('off')
                
                # Add colorbar
                plt.colorbar(im1, ax=axes[0, i], fraction=0.046, pad=0.04)
                plt.colorbar(im2, ax=axes[1, i], fraction=0.046, pad=0.04)
            
            plt.tight_layout()
            
            if save_path:
                plt.savefig(save_path, dpi=150, bbox_inches='tight')
                plt.close()
            else:
                plt.show()
    
    def train(self, num_steps):
        """Main training loop"""
        logger.log(f"Starting VAE training for {num_steps} steps")
        
        train_iter = iter(self.train_loader)
        
        while self.step < num_steps:
            # Get next batch
            try:
                batch = next(train_iter)
            except StopIteration:
                train_iter = iter(self.train_loader)
                batch = next(train_iter)
                self.epoch += 1
            
            # Training step
            loss_dict, extra_info = self.train_step(batch)
            
            # Log training metrics
            if self.step % self.log_interval == 0:
                if self.writer:
                    for key, value in loss_dict.items():
                        scalar = value.item() if torch.is_tensor(value) else float(value)
                        self.writer.add_scalar(f'train/{key}', scalar, self.step)
                    for key, value in extra_info.items():
                        self.writer.add_scalar(f'train/{key}', float(value), self.step)
                
                logger.log(
                    f"Step {self.step:6d}: "
                    f"loss={loss_dict['total_loss']:.4f}, "
                    f"recon={loss_dict['reconstruction_loss']:.4f}, "
                    f"kl={loss_dict['kl_loss']:.4f}, "
                    f"beta={extra_info['beta']:.6f}"
                )
            
            # Validation
            if self.step % self.validation_interval == 0:
                val_metrics = self.validate()
                if val_metrics:
                    for key, value in val_metrics.items():
                        self.writer.add_scalar(key, value, self.step)
                    
                    val_loss = val_metrics.get('val_total_loss', float('inf'))
                    is_best = val_loss < self.best_val_loss
                    if is_best:
                        self.best_val_loss = val_loss
                    
                    logger.log(f"Validation - Step {self.step}: {val_metrics}")
                    
                    # Save validation metrics
                    self.val_losses.append({'step': self.step, **val_metrics})
            
            # Save checkpoint
            if self.step % self.save_interval == 0:
                is_best = len(self.val_losses) > 0 and self.val_losses[-1].get('val_total_loss', float('inf')) == self.best_val_loss
                self.save_checkpoint(is_best=is_best)
                
                # Generate samples
                sample_path = os.path.join(self.save_dir, f'samples_step_{self.step:06d}.png')
                self.generate_samples(save_path=sample_path)
            
            # Track training loss
            self.train_losses.append({
                'step': self.step,
                'loss': loss_dict['total_loss'].item(),
                'recon_loss': loss_dict['reconstruction_loss'].item(),
                'kl_loss': loss_dict['kl_loss'].item(),
            })
        
        # Final checkpoint
        self.save_checkpoint()
        logger.log("VAE training completed!")


def create_vae_model(args):
    """Create VAE model from arguments"""
    model = HiCVAE(
        in_channels=1,
        latent_dim=args['vae_latent_dim'],
        base_channels=args['vae_base_channels'],
        channel_multipliers=args['vae_channel_multipliers'],
        num_res_blocks=args['vae_num_res_blocks'],
        dropout=args['vae_dropout'],
        use_attention=args['vae_use_attention'],
        beta=args['vae_beta'],
        output_size=args['window_size'],
        reconstruction_loss=args['vae_recon_loss'],
    )
    return model


def parse_vae_args():
    """Parse command-line arguments for VAE training"""
    # Get default data arguments
    defaults = hic_data_defaults()
    
    # Add VAE-specific defaults
    vae_defaults = {
        'vae_latent_dim': 256,
        'vae_base_channels': 64,
        'vae_channel_multipliers': [1, 2, 4, 8],
        'vae_num_res_blocks': 2,
        'vae_dropout': 0.1,
        'vae_use_attention': True,
        'vae_beta': 1e-3,
        'vae_recon_loss': 'mse',
        'vae_learning_rate': 1e-4,
        'vae_weight_decay': 1e-5,
        'vae_beta_schedule': 'linear',
        'vae_beta_start': 1e-4,
        'vae_beta_end': 1e-2,
        'vae_beta_warmup_steps': 10000,
        'vae_num_steps': 200000,
        'vae_batch_size': 16,
        'vae_val_split': 0.1,
        'vae_save_dir': './vae_checkpoints',
        'vae_log_interval': 100,
        'vae_save_interval': 5000,
        'vae_validation_interval': 1000,
        'vae_resume_checkpoint': '',
    }
    
    defaults.update(vae_defaults)
    
    parser = argparse.ArgumentParser(description='VAE Pre-training for Hi-C Semantic Encoding')
    add_dict_to_argparser(parser, defaults)
    
    args = parser.parse_args()
    return vars(args)


def main():
    """Main VAE training function"""
    args = parse_vae_args()
    
    # Setup distributed training
    dist_util.setup_dist()
    device = dist_util.dev()
    
    # Setup logging
    os.makedirs(args['vae_save_dir'], exist_ok=True)
    logger.configure(dir=args['vae_save_dir'])
    
    # Save arguments
    with open(os.path.join(args['vae_save_dir'], 'vae_training_args.json'), 'w') as f:
        json.dump(args, f, indent=2)
    
    logger.log("=== VAE Pre-training Configuration ===")
    logger.log(f"Latent dimension: {args['vae_latent_dim']}")
    logger.log(f"Base channels: {args['vae_base_channels']}")
    logger.log(f"Channel multipliers: {args['vae_channel_multipliers']}")
    logger.log(f"Batch size: {args['vae_batch_size']}")
    logger.log(f"Learning rate: {args['vae_learning_rate']}")
    logger.log(f"Beta schedule: {args['vae_beta_schedule']}")
    logger.log(f"Training steps: {args['vae_num_steps']}")
    logger.log(f"Data directory: {args['data_dir']}")
    logger.log(f"Window size: {args['window_size']}")
    
    # Create full dataset
    full_dataloader = create_hic_dataloader(
        data_dir=args['data_dir'],
        batch_size=args['vae_batch_size'],
        timepoints=args['timepoints'].split(','),
        file_format=args['file_format'],
        chromosome=args['chromosome'],
        resolution=args['resolution'],
        window_size=args['window_size'],
        normalization=args['normalization'],
        log_transform=args['log_transform'],
        interpolation_prob=0.0,  # No interpolation for VAE training
        random_window=True,
        use_grid_windows=True,  # Use random windows for VAE training
        num_workers=4,
    )
    
    # Split into train/validation
    dataset_size = len(full_dataloader.dataset)
    logger.log(f"Dataset size: {dataset_size}")
    val_size = int(args['vae_val_split'] * dataset_size)
    train_size = dataset_size - val_size
    logger.log(f"Train size: {train_size}, Val size: {val_size}")
    
    train_dataset, val_dataset = torch.utils.data.random_split(
        full_dataloader.dataset, [train_size, val_size]
    )
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=args['vae_batch_size'],
        shuffle=True,
        num_workers=4,
        pin_memory=True,
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=args['vae_batch_size'],
        shuffle=False,
        num_workers=4,
        pin_memory=True,
    ) if val_size > 0 else None
    
    # Create model
    model = create_vae_model(args)
    
    # Create trainer
    trainer = VAETrainer(
        model=model,
        train_loader=train_loader,
        val_loader=val_loader,
        learning_rate=args['vae_learning_rate'],
        weight_decay=args['vae_weight_decay'],
        beta_schedule=args['vae_beta_schedule'],
        beta_start=args['vae_beta_start'],
        beta_end=args['vae_beta_end'],
        beta_warmup_steps=args['vae_beta_warmup_steps'],
        save_dir=args['vae_save_dir'],
        log_interval=args['vae_log_interval'],
        save_interval=args['vae_save_interval'],
        validation_interval=args['vae_validation_interval'],
        device=device,
    )
    
    # Resume from checkpoint if specified
    if args['vae_resume_checkpoint']:
        trainer.load_checkpoint(args['vae_resume_checkpoint'])
    
    # Train VAE
    trainer.train(args['vae_num_steps'])
    
    # Final evaluation
    logger.log("Performing final evaluation...")
    final_metrics = trainer.validate()
    if final_metrics:
        logger.log(f"Final validation metrics: {final_metrics}")
    
    # Generate final samples
    trainer.generate_samples(
        num_samples=8,
        save_path=os.path.join(args['vae_save_dir'], 'final_samples.png')
    )
    
    logger.log(f"VAE training completed. Models saved to {args['vae_save_dir']}")
    logger.log("Use 'vae_encoder.pt' for diffusion model integration")


if __name__ == "__main__":
    main() 