"""
Common utility functions for RINN implementation.
"""

import torch
import numpy as np
import random


def set_random_seed(seed=42):
    """Set random seed for reproducibility across all libraries."""
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    # For deterministic behavior
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


def generate_collocation_points(domain, n_points, method='uniform', boundary_ratio=0.2):
    """
    Generate collocation points for PDE training.
    
    Args:
        domain: Dictionary with 'x_min', 'x_max', 'y_min', 'y_max'
        n_points: Total number of points to generate
        method: 'uniform', 'random', or 'latin_hypercube'
        boundary_ratio: Ratio of points to place on boundaries
        
    Returns:
        torch.Tensor: Points of shape (n_points, 2)
    """
    x_min, x_max = domain['x_min'], domain['x_max']
    y_min, y_max = domain['y_min'], domain['y_max']
    
    if method == 'uniform':
        # Create uniform grid
        n_x = int(np.sqrt(n_points))
        n_y = n_points // n_x
        x = torch.linspace(x_min, x_max, n_x)
        y = torch.linspace(y_min, y_max, n_y)
        xx, yy = torch.meshgrid(x, y, indexing='ij')
        points = torch.stack([xx.flatten(), yy.flatten()], dim=1)
        
    elif method == 'random':
        # Random sampling
        x = torch.rand(n_points) * (x_max - x_min) + x_min
        y = torch.rand(n_points) * (y_max - y_min) + y_min
        points = torch.stack([x, y], dim=1)
        
    elif method == 'latin_hypercube':
        # Latin hypercube sampling
        from scipy.stats import qmc
        sampler = qmc.LatinHypercube(d=2)
        sample = sampler.random(n=n_points)
        x = sample[:, 0] * (x_max - x_min) + x_min
        y = sample[:, 1] * (y_max - y_min) + y_min
        points = torch.tensor(np.column_stack([x, y]), dtype=torch.float32)
        
    return points


def generate_boundary_points(domain, n_boundary, include_corners=True):
    """
    Generate points on domain boundaries.
    
    Args:
        domain: Dictionary with 'x_min', 'x_max', 'y_min', 'y_max'
        n_boundary: Number of boundary points per edge
        include_corners: Whether to include corner points
        
    Returns:
        torch.Tensor: Boundary points
    """
    x_min, x_max = domain['x_min'], domain['x_max']
    y_min, y_max = domain['y_min'], domain['y_max']
    
    boundary_points = []
    
    # Bottom edge (y = y_min)
    x_bottom = torch.linspace(x_min, x_max, n_boundary)
    y_bottom = torch.full_like(x_bottom, y_min)
    boundary_points.append(torch.stack([x_bottom, y_bottom], dim=1))
    
    # Top edge (y = y_max)
    x_top = torch.linspace(x_min, x_max, n_boundary)
    y_top = torch.full_like(x_top, y_max)
    boundary_points.append(torch.stack([x_top, y_top], dim=1))
    
    # Left edge (x = x_min), excluding corners if already included
    start_idx = 1 if include_corners else 0
    end_idx = -1 if include_corners else n_boundary
    y_left = torch.linspace(y_min, y_max, n_boundary)[start_idx:end_idx]
    x_left = torch.full_like(y_left, x_min)
    if len(y_left) > 0:
        boundary_points.append(torch.stack([x_left, y_left], dim=1))
    
    # Right edge (x = x_max), excluding corners if already included
    y_right = torch.linspace(y_min, y_max, n_boundary)[start_idx:end_idx]
    x_right = torch.full_like(y_right, x_max)
    if len(y_right) > 0:
        boundary_points.append(torch.stack([x_right, y_right], dim=1))
    
    return torch.cat(boundary_points, dim=0)


def compute_derivatives(network, points, order=1):
    """
    Compute derivatives of network output with respect to input coordinates.
    
    Args:
        network: PyTorch neural network
        points: Input points (batch_size, 2)
        order: Derivative order (1 or 2)
        
    Returns:
        Dictionary with derivative tensors
    """
    points.requires_grad_(True)
    u = network(points)
    
    if order >= 1:
        # First derivatives
        grad = torch.autograd.grad(
            outputs=u, inputs=points,
            grad_outputs=torch.ones_like(u),
            create_graph=True, retain_graph=True
        )[0]
        
        u_x = grad[:, 0:1]
        u_y = grad[:, 1:2]
        
        derivatives = {'u': u, 'u_x': u_x, 'u_y': u_y}
        
        if order >= 2:
            # Second derivatives
            u_xx = torch.autograd.grad(
                outputs=u_x, inputs=points,
                grad_outputs=torch.ones_like(u_x),
                create_graph=True, retain_graph=True
            )[0][:, 0:1]
            
            u_yy = torch.autograd.grad(
                outputs=u_y, inputs=points,
                grad_outputs=torch.ones_like(u_y),
                create_graph=True, retain_graph=True
            )[0][:, 1:2]
            
            u_xy = torch.autograd.grad(
                outputs=u_x, inputs=points,
                grad_outputs=torch.ones_like(u_x),
                create_graph=True, retain_graph=True
            )[0][:, 1:2]
            
            derivatives.update({'u_xx': u_xx, 'u_yy': u_yy, 'u_xy': u_xy})
    
    return derivatives


def laplacian(network, points):
    """
    Compute Laplacian (u_xx + u_yy) of network output.
    
    Args:
        network: PyTorch neural network
        points: Input points (batch_size, 2)
        
    Returns:
        torch.Tensor: Laplacian values
    """
    derivatives = compute_derivatives(network, points, order=2)
    return derivatives['u_xx'] + derivatives['u_yy']


def print_training_info(epoch, loss_dict, print_every=100):
    """Print training information in a formatted way."""
    if epoch % print_every == 0:
        loss_str = ', '.join([f'{k}: {v:.6e}' for k, v in loss_dict.items()])
        print(f"Epoch {epoch:6d}: {loss_str}")


def save_model_state(model, optimizer, epoch, loss, filepath):
    """Save complete model state for checkpointing."""
    torch.save({
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'loss': loss,
    }, filepath)


def load_model_state(model, optimizer, filepath):
    """Load model state from checkpoint."""
    checkpoint = torch.load(filepath)
    model.load_state_dict(checkpoint['model_state_dict'])
    if optimizer is not None:
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    return checkpoint['epoch'], checkpoint['loss']