"""
Extreme Learning Machine (ELM) implementation for SOC estimation
"""

import numpy as np
from sklearn.preprocessing import StandardScaler
from typing import Tuple, Optional


class ExtremeLearningMachine:
    def __init__(
        self,
        input_size: int,
        hidden_size: int,
        output_size: int,
        activation: str = 'sigmoid',
        random_state: Optional[int] = None
    ):
        """
        Initialize Extreme Learning Machine
        
        Args:
            input_size: Number of input features
            hidden_size: Number of hidden neurons
            output_size: Number of output neurons
            activation: Activation function ('sigmoid', 'tanh', 'relu')
            random_state: Random seed for reproducibility
        """
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.activation = activation
        
        # Set random seed if provided
        if random_state is not None:
            np.random.seed(random_state)
        
        # Initialize input weights and biases randomly
        # These are the parameters that will be optimized by IDBO
        self.input_weights = np.random.uniform(-1, 1, (self.input_size, self.hidden_size))
        self.biases = np.random.uniform(-1, 1, self.hidden_size)
        
        # Output weights will be calculated analytically
        self.output_weights = None
        
        # Scaling for input normalization
        self.scaler = StandardScaler()
    
    def _activate(self, x: np.ndarray) -> np.ndarray:
        """
        Apply activation function
        
        Args:
            x: Input array
            
        Returns:
            Activated output
        """
        if self.activation == 'sigmoid':
            return 1.0 / (1.0 + np.exp(-x))
        elif self.activation == 'tanh':
            return np.tanh(x)
        elif self.activation == 'relu':
            return np.maximum(0, x)
        else:
            raise ValueError(f"Unsupported activation function: {self.activation}")
    
    def _calculate_hidden_layer_output(self, X: np.ndarray) -> np.ndarray:
        """
        Calculate the output of the hidden layer
        
        Args:
            X: Input data of shape (n_samples, input_size)
            
        Returns:
            Hidden layer output of shape (n_samples, hidden_size)
        """
        # Calculate the weighted sum
        weighted_sum = np.dot(X, self.input_weights) + self.biases
        
        # Apply activation function
        H = self._activate(weighted_sum)
        
        return H
    
    def fit(self, X: np.ndarray, y: np.ndarray) -> None:
        """
        Train the ELM model
        
        Args:
            X: Training data of shape (n_samples, input_size)
            y: Target values of shape (n_samples, output_size)
        """
        # Scale input data
        X_scaled = self.scaler.fit_transform(X)
        
        # Calculate hidden layer output
        H = self._calculate_hidden_layer_output(X_scaled)
        
        # Calculate output weights using Moore-Penrose pseudoinverse
        # This is the analytical solution that makes ELM fast
        H_pinv = np.linalg.pinv(H)
        self.output_weights = np.dot(H_pinv, y)
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """
        Make predictions using the trained ELM model
        
        Args:
            X: Input data of shape (n_samples, input_size)
            
        Returns:
            Predictions of shape (n_samples, output_size)
        """
        # Scale input data
        X_scaled = self.scaler.transform(X)
        
        # Calculate hidden layer output
        H = self._calculate_hidden_layer_output(X_scaled)
        
        # Calculate predictions
        predictions = np.dot(H, self.output_weights)
        
        return predictions
    
    def evaluate(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, float, float]:
        """
        Evaluate the model performance
        
        Args:
            X: Test data
            y: True values
            
        Returns:
            Tuple of (RMSE, MAE, R²)
        """
        y_pred = self.predict(X)
        
        # Calculate Root Mean Squared Error (RMSE)
        rmse = np.sqrt(np.mean((y_pred - y) ** 2))
        
        # Calculate Mean Absolute Error (MAE)
        mae = np.mean(np.abs(y_pred - y))
        
        # Calculate R² (coefficient of determination)
        ss_total = np.sum((y - np.mean(y)) ** 2)
        ss_residual = np.sum((y - y_pred) ** 2)
        r2 = 1 - (ss_residual / ss_total)
        
        return rmse, mae, r2
    
    def get_params(self) -> np.ndarray:
        """
        Get the model parameters (input weights and biases) as a flat array
        
        Returns:
            Flattened array of parameters
        """
        return np.concatenate([self.input_weights.flatten(), self.biases])
    
    def set_params(self, params: np.ndarray) -> None:
        """
        Set the model parameters from a flat array
        
        Args:
            params: Flattened array of parameters
        """
        # Split the parameters back into weights and biases
        input_weights_size = self.input_size * self.hidden_size
        self.input_weights = params[:input_weights_size].reshape(self.input_size, self.hidden_size)
        self.biases = params[input_weights_size:]
