"""
AE-PINN Solver with training loop and optimization strategies.

This module implements the main training algorithm for Attention-Enhanced Physics-Informed 
Neural Networks, including data sampling, optimization, and performance monitoring.
"""

import torch
import torch.optim as optim
import numpy as np
import time
import os
from typing import Dict, List, Tuple, Optional, Callable
import matplotlib.pyplot as plt

from .loss import AEPINNLoss, AdaptiveLossWeights


class CollocationPointSampler:
    """
    Generates collocation points for different regions (interior, boundary, interface).
    
    Implements various sampling strategies to ensure adequate coverage of the domain
    and critical regions like interfaces.
    """
    
    def __init__(self, domain_bounds, level_set_func, interface_bandwidth=0.1):
        """
        Initialize point sampler.
        
        Args:
            domain_bounds (list): [[x_min, x_max], [y_min, y_max]]
            level_set_func (callable): Level-set function defining interface
            interface_bandwidth (float): Width around interface for enhanced sampling
        """
        self.domain_bounds = domain_bounds
        self.level_set_func = level_set_func
        self.interface_bandwidth = interface_bandwidth
    
    def sample_interior_points(self, n_points, exclude_interface=True):
        """
        Sample interior collocation points.
        
        Args:
            n_points (int): Number of points to sample
            exclude_interface (bool): Whether to exclude points near interface
            
        Returns:
            torch.Tensor: Interior points, shape (n_points, 2)
        """
        x_min, x_max = self.domain_bounds[0]
        y_min, y_max = self.domain_bounds[1]
        
        # Generate random points in domain
        x = torch.rand(n_points * 2) * (x_max - x_min) + x_min
        y = torch.rand(n_points * 2) * (y_max - y_min) + y_min
        
        if exclude_interface:
            # Filter out points too close to interface
            phi_vals = self.level_set_func(x, y)
            valid_mask = torch.abs(phi_vals) > self.interface_bandwidth
            
            x_valid = x[valid_mask][:n_points]
            y_valid = y[valid_mask][:n_points]
            
            # If not enough valid points, fill with random points
            if len(x_valid) < n_points:
                n_extra = n_points - len(x_valid)
                x_extra = torch.rand(n_extra) * (x_max - x_min) + x_min
                y_extra = torch.rand(n_extra) * (y_max - y_min) + y_min
                x_valid = torch.cat([x_valid, x_extra])
                y_valid = torch.cat([y_valid, y_extra])
        else:
            x_valid = x[:n_points]
            y_valid = y[:n_points]
        
        return torch.stack([x_valid, y_valid], dim=1)
    
    def sample_boundary_points(self, n_points):
        """
        Sample boundary points on domain boundary.
        
        Args:
            n_points (int): Number of boundary points
            
        Returns:
            torch.Tensor: Boundary points, shape (n_points, 2)
        """
        x_min, x_max = self.domain_bounds[0]
        y_min, y_max = self.domain_bounds[1]
        
        boundary_points = []
        points_per_side = n_points // 4
        
        # Bottom boundary (y = y_min)
        x_bottom = torch.rand(points_per_side) * (x_max - x_min) + x_min
        y_bottom = torch.full_like(x_bottom, y_min)
        boundary_points.append(torch.stack([x_bottom, y_bottom], dim=1))
        
        # Top boundary (y = y_max)
        x_top = torch.rand(points_per_side) * (x_max - x_min) + x_min
        y_top = torch.full_like(x_top, y_max)
        boundary_points.append(torch.stack([x_top, y_top], dim=1))
        
        # Left boundary (x = x_min)
        x_left = torch.full((points_per_side,), x_min)
        y_left = torch.rand(points_per_side) * (y_max - y_min) + y_min
        boundary_points.append(torch.stack([x_left, y_left], dim=1))
        
        # Right boundary (x = x_max)
        x_right = torch.full((points_per_side,), x_max)
        y_right = torch.rand(points_per_side) * (y_max - y_min) + y_min
        boundary_points.append(torch.stack([x_right, y_right], dim=1))
        
        return torch.cat(boundary_points, dim=0)
    
    def sample_interface_points(self, n_points, method='rejection'):
        """
        Sample points on or near the interface.
        
        Args:
            n_points (int): Number of interface points
            method (str): Sampling method ('rejection', 'parametric')
            
        Returns:
            torch.Tensor: Interface points, shape (n_points, 2)
        """
        if method == 'rejection':
            return self._sample_interface_rejection(n_points)
        elif method == 'parametric':
            return self._sample_interface_parametric(n_points)
        else:
            raise ValueError(f"Unknown sampling method: {method}")
    
    def _sample_interface_rejection(self, n_points):
        """Sample interface points using rejection sampling."""
        x_min, x_max = self.domain_bounds[0]
        y_min, y_max = self.domain_bounds[1]
        
        interface_points = []
        max_attempts = n_points * 100  # Prevent infinite loop
        attempts = 0
        
        while len(interface_points) < n_points and attempts < max_attempts:
            # Sample random points in domain
            x = torch.rand(n_points * 10) * (x_max - x_min) + x_min
            y = torch.rand(n_points * 10) * (y_max - y_min) + y_min
            
            # Check which points are near interface
            phi_vals = self.level_set_func(x, y)
            near_interface = torch.abs(phi_vals) < self.interface_bandwidth
            
            x_near = x[near_interface]
            y_near = y[near_interface]
            
            if len(x_near) > 0:
                points_to_add = min(len(x_near), n_points - len(interface_points))
                interface_points.append(torch.stack([x_near[:points_to_add], 
                                                   y_near[:points_to_add]], dim=1))
            
            attempts += 1
        
        if len(interface_points) == 0:
            # Fallback: return random points
            x = torch.rand(n_points) * (x_max - x_min) + x_min
            y = torch.rand(n_points) * (y_max - y_min) + y_min
            return torch.stack([x, y], dim=1)
        
        return torch.cat(interface_points, dim=0)[:n_points]
    
    def _sample_interface_parametric(self, n_points):
        """Sample interface points using parametric representation (for simple shapes)."""
        # This is a placeholder for parametric sampling
        # For complex interfaces, rejection sampling is more general
        return self._sample_interface_rejection(n_points)


class AEPINNSolver:
    """
    Main solver for Attention-Enhanced Physics-Informed Neural Networks.
    
    Handles the complete training process including data generation,
    loss computation, optimization, and convergence monitoring.
    """
    
    def __init__(self, model, loss_function, domain_bounds, level_set_func, device=None):
        """
        Initialize AE-PINN solver.
        
        Args:
            model: Composite AE-PINN model (μ_nn + u_IA)
            loss_function: AEPINNLoss instance
            domain_bounds: Domain boundaries [[x_min, x_max], [y_min, y_max]]
            level_set_func: Level-set function defining interface
            device: PyTorch device (CPU/GPU)
        """
        self.model = model
        self.loss_function = loss_function
        self.domain_bounds = domain_bounds
        self.level_set_func = level_set_func
        
        # Set device
        if device is None:
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = device
        
        self.model.to(self.device)
        
        # Initialize CUDA context if using GPU to avoid cuBLAS context issues
        if self.device.type == 'cuda':
            torch.cuda.empty_cache()
            # Perform a simple operation to initialize CUDA context
            dummy_tensor = torch.zeros(1, device=self.device)
            _ = dummy_tensor + 1
            torch.cuda.synchronize()
        
        # Point sampler
        self.sampler = CollocationPointSampler(domain_bounds, level_set_func)
        
        # Training history
        self.training_history = {
            'loss_total': [],
            'loss_pde': [],
            'loss_boundary': [],
            'loss_interface': [],
            'loss_flux': [],
            'learning_rates': [],
            'epochs': []
        }
        
        # Adaptive loss weights
        self.adaptive_weights = None
    
    def setup_optimizer(self, optimizer_type='adam', learning_rate=1e-3, 
                       scheduler_type=None, **optimizer_kwargs):
        """
        Setup optimizer and learning rate scheduler.
        
        Args:
            optimizer_type (str): Type of optimizer ('adam', 'lbfgs', 'sgd')
            learning_rate (float): Initial learning rate
            scheduler_type (str): Type of scheduler ('step', 'cosine', 'plateau')
            **optimizer_kwargs: Additional optimizer arguments
        """
        if optimizer_type.lower() == 'adam':
            self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate, 
                                      **optimizer_kwargs)
        elif optimizer_type.lower() == 'lbfgs':
            self.optimizer = optim.LBFGS(self.model.parameters(), lr=learning_rate,
                                       max_iter=20, **optimizer_kwargs)
        elif optimizer_type.lower() == 'sgd':
            self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate,
                                     **optimizer_kwargs)
        else:
            raise ValueError(f"Unknown optimizer type: {optimizer_type}")
        
        # Setup scheduler
        if scheduler_type == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 
                                                     step_size=1000, gamma=0.9)
        elif scheduler_type == 'cosine':
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, 
                                                                T_max=5000)
        elif scheduler_type == 'plateau':
            self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 
                                                                patience=500, factor=0.8)
        else:
            self.scheduler = None
    
    def generate_training_data(self, n_interior=1000, n_boundary=200, n_interface=200,
                             boundary_func=None, jump_func=None, flux_jump_func=None):
        """
        Generate training data (collocation points and boundary conditions).
        
        Args:
            n_interior (int): Number of interior collocation points
            n_boundary (int): Number of boundary points
            n_interface (int): Number of interface points
            boundary_func (callable): Function defining boundary conditions
            jump_func (callable): Function defining interface jump conditions
            flux_jump_func (callable): Function defining flux jump conditions
            
        Returns:
            dict: Training data dictionary
        """
        # Sample collocation points
        x_interior = self.sampler.sample_interior_points(n_interior)
        x_boundary = self.sampler.sample_boundary_points(n_boundary)
        x_interface = self.sampler.sample_interface_points(n_interface)
        
        # Move to device
        x_interior = x_interior.to(self.device)
        x_boundary = x_boundary.to(self.device)
        x_interface = x_interface.to(self.device)
        
        training_data = {
            'x_interior': x_interior,
            'x_boundary': x_boundary,
            'x_interface': x_interface
        }
        
        # Generate boundary conditions
        if boundary_func is not None:
            u_boundary = boundary_func(x_boundary[:, 0], x_boundary[:, 1]).unsqueeze(1)
            training_data['u_boundary'] = u_boundary.to(self.device)
        
        # Generate interface jump conditions
        if jump_func is not None:
            jump_values = jump_func(x_interface[:, 0], x_interface[:, 1]).unsqueeze(1)
            training_data['jump_values'] = jump_values.to(self.device)
        
        # Generate flux jump conditions
        if flux_jump_func is not None:
            flux_jump_values = flux_jump_func(x_interface[:, 0], x_interface[:, 1]).unsqueeze(1)
            training_data['flux_jump_values'] = flux_jump_values.to(self.device)
        
        return training_data
    
    def train_step(self, training_data):
        """
        Perform one training step.
        
        Args:
            training_data (dict): Training data dictionary
            
        Returns:
            float: Total loss value
        """
        self.model.train()
        
        if isinstance(self.optimizer, optim.LBFGS):
            # LBFGS requires closure function
            def closure():
                self.optimizer.zero_grad()
                loss = self.loss_function(self.model, training_data)
                loss.backward()
                return loss
            
            self.optimizer.step(closure)
            loss = closure()
        else:
            # Standard optimizers
            self.optimizer.zero_grad()
            loss = self.loss_function(self.model, training_data)
            loss.backward()
            self.optimizer.step()
        
        return loss.item()
    
    def train(self, epochs=10000, n_interior=1000, n_boundary=200, n_interface=200,
              boundary_func=None, jump_func=None, flux_jump_func=None,
              print_every=1000, save_every=5000, save_path=None,
              use_adaptive_weights=True, resample_every=1000):
        """
        Main training loop.
        
        Args:
            epochs (int): Number of training epochs
            n_interior (int): Number of interior points per epoch
            n_boundary (int): Number of boundary points per epoch
            n_interface (int): Number of interface points per epoch
            boundary_func (callable): Boundary condition function
            jump_func (callable): Interface jump condition function
            flux_jump_func (callable): Flux jump condition function
            print_every (int): Print frequency
            save_every (int): Model save frequency
            save_path (str): Path to save model checkpoints
            use_adaptive_weights (bool): Whether to use adaptive loss weights
            resample_every (int): Frequency of resampling collocation points
        """
        print(f"Starting AE-PINN training on {self.device}")
        print(f"Model parameters: {sum(p.numel() for p in self.model.parameters())}")
        
        # Setup adaptive weights
        if use_adaptive_weights and self.adaptive_weights is None:
            initial_weights = {
                'pde': self.loss_function.tau_pde,
                'boundary': self.loss_function.tau_bc,
                'interface_jump': self.loss_function.tau_interface_u,
                'flux_jump': self.loss_function.tau_interface_flux
            }
            self.adaptive_weights = AdaptiveLossWeights(initial_weights)
        
        start_time = time.time()
        
        # Generate initial training data
        training_data = self.generate_training_data(
            n_interior, n_boundary, n_interface,
            boundary_func, jump_func, flux_jump_func
        )
        
        for epoch in range(epochs):
            # Resample collocation points periodically
            if epoch % resample_every == 0 and epoch > 0:
                training_data = self.generate_training_data(
                    n_interior, n_boundary, n_interface,
                    boundary_func, jump_func, flux_jump_func
                )
            
            # Update adaptive weights
            if use_adaptive_weights and epoch > 100:
                self.adaptive_weights.update_weights(self.loss_function.loss_components)
                self.loss_function.tau_pde = self.adaptive_weights.weights['pde']
                self.loss_function.tau_bc = self.adaptive_weights.weights['boundary']
                self.loss_function.tau_interface_u = self.adaptive_weights.weights['interface_jump']
                self.loss_function.tau_interface_flux = self.adaptive_weights.weights['flux_jump']
            
            # Training step
            loss_total = self.train_step(training_data)
            
            # Update learning rate scheduler
            if self.scheduler is not None:
                if isinstance(self.scheduler, optim.lr_scheduler.ReduceLROnPlateau):
                    self.scheduler.step(loss_total)
                else:
                    self.scheduler.step()
            
            # Record training history
            self.training_history['loss_total'].append(loss_total)
            self.training_history['epochs'].append(epoch)
            
            # Record component losses
            if hasattr(self.loss_function, 'loss_components'):
                components = self.loss_function.loss_components
                self.training_history['loss_pde'].append(components.get('pde', 0))
                self.training_history['loss_boundary'].append(components.get('boundary', 0))
                self.training_history['loss_interface'].append(components.get('interface_jump', 0))
                self.training_history['loss_flux'].append(components.get('flux_jump', 0))
            
            # Record learning rate
            current_lr = self.optimizer.param_groups[0]['lr']
            self.training_history['learning_rates'].append(current_lr)
            
            # Print progress
            if epoch % print_every == 0:
                elapsed_time = time.time() - start_time
                print(f"Epoch {epoch:6d}, Loss: {loss_total:.2e}, "
                      f"LR: {current_lr:.2e}, Time: {elapsed_time:.1f}s")
                
                if hasattr(self.loss_function, 'loss_components'):
                    components = self.loss_function.loss_components
                    print(f"  Components - PDE: {components.get('pde', 0):.2e}, "
                          f"BC: {components.get('boundary', 0):.2e}, "
                          f"Interface: {components.get('interface_jump', 0):.2e}, "
                          f"Flux: {components.get('flux_jump', 0):.2e}")
            
            # Save model checkpoint
            if save_path is not None and epoch % save_every == 0 and epoch > 0:
                self.save_checkpoint(save_path, epoch)
        
        total_time = time.time() - start_time
        print(f"Training completed in {total_time:.1f} seconds")
    
    def save_checkpoint(self, save_path, epoch):
        """Save model checkpoint."""
        os.makedirs(save_path, exist_ok=True)
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'training_history': self.training_history,
            'loss_weights': {
                'tau_pde': self.loss_function.tau_pde,
                'tau_bc': self.loss_function.tau_bc,
                'tau_interface_u': self.loss_function.tau_interface_u,
                'tau_interface_flux': self.loss_function.tau_interface_flux
            }
        }
        
        if self.scheduler is not None:
            checkpoint['scheduler_state_dict'] = self.scheduler.state_dict()
        
        torch.save(checkpoint, os.path.join(save_path, f'checkpoint_epoch_{epoch}.pt'))
    
    def load_checkpoint(self, checkpoint_path):
        """Load model checkpoint."""
        checkpoint = torch.load(checkpoint_path, map_location=self.device)
        
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.training_history = checkpoint['training_history']
        
        # Restore loss weights
        weights = checkpoint['loss_weights']
        self.loss_function.tau_pde = weights['tau_pde']
        self.loss_function.tau_bc = weights['tau_bc']
        self.loss_function.tau_interface_u = weights['tau_interface_u']
        self.loss_function.tau_interface_flux = weights['tau_interface_flux']
        
        if self.scheduler is not None and 'scheduler_state_dict' in checkpoint:
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        
        return checkpoint['epoch']
    
    def plot_training_history(self, save_path=None):
        """Plot training loss history."""
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 8))
        
        epochs = self.training_history['epochs']
        
        # Total loss
        ax1.semilogy(epochs, self.training_history['loss_total'])
        ax1.set_title('Total Loss')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss')
        ax1.grid(True)
        
        # Component losses
        ax2.semilogy(epochs, self.training_history['loss_pde'], label='PDE')
        ax2.semilogy(epochs, self.training_history['loss_boundary'], label='Boundary')
        ax2.semilogy(epochs, self.training_history['loss_interface'], label='Interface')
        ax2.semilogy(epochs, self.training_history['loss_flux'], label='Flux')
        ax2.set_title('Loss Components')
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Loss')
        ax2.legend()
        ax2.grid(True)
        
        # Learning rate
        ax3.semilogy(epochs, self.training_history['learning_rates'])
        ax3.set_title('Learning Rate')
        ax3.set_xlabel('Epoch')
        ax3.set_ylabel('Learning Rate')
        ax3.grid(True)
        
        # Loss ratio (for balance monitoring)
        if len(self.training_history['loss_pde']) > 0:
            loss_ratios = []
            for i in range(len(epochs)):
                pde = self.training_history['loss_pde'][i]
                bc = self.training_history['loss_boundary'][i]
                if pde > 0 and bc > 0:
                    loss_ratios.append(bc / pde)
                else:
                    loss_ratios.append(1.0)
            
            ax4.plot(epochs, loss_ratios)
            ax4.set_title('BC/PDE Loss Ratio')
            ax4.set_xlabel('Epoch')
            ax4.set_ylabel('Ratio')
            ax4.grid(True)
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"Training history saved to {save_path}")
        else:
            plt.savefig('training_history.png', dpi=300, bbox_inches='tight')
            print("Training history saved to training_history.png")
        
        plt.close()


if __name__ == "__main__":
    # Test the solver implementation
    print("Testing AE-PINN solver...")
    
    # This would normally be done in a separate script
    # with proper problem setup and model configuration
    print("Solver implementation completed successfully!")