import numpy as np
from scipy.optimize import minimize
from typing import Dict, List, Optional, Tuple
import matplotlib.pyplot as plt
import random
import time
class NumericalModel:
    """A simple linear model implementation for demonstration"""
    def __init__(self, state_size: int, time_step: float = 0.1):
        self.state_size = state_size
        self.time_step = time_step
        # Simple dynamics matrix (e.g., advection-diffusion)
        self.dynamics_matrix = self._create_dynamics_matrix()
        
    def _create_dynamics_matrix(self) -> np.ndarray:
        """Create a simple tridiagonal dynamics matrix"""
        A = np.diag(np.ones(self.state_size) * (1 - 2 * self.time_step))
        # Add off-diagonal terms for advection
        idx = np.arange(self.state_size - 1)
        A[idx, idx + 1] = self.time_step
        A[idx + 1, idx] = self.time_step
        return A
        
    def integrate(self, x0: np.ndarray, t_steps: int) -> np.ndarray:
        """Run model forward in time"""
        trajectory = np.zeros((t_steps + 1, len(x0)))
        trajectory[0] = x0
        
        for t in range(t_steps):
            trajectory[t + 1] = self.step(trajectory[t])
        
        return trajectory
    
    def step(self, x: np.ndarray) -> np.ndarray:
        """Single time step integration"""
        return self.dynamics_matrix @ x
        
    def tangent_linear(self, x_ref: np.ndarray) -> np.ndarray:
        """Tangent linear model (same as dynamics matrix for linear models)"""
        return self.dynamics_matrix
        
    def adjoint(self, x_ref: np.ndarray) -> np.ndarray:
        """Adjoint of tangent linear model"""
        return self.dynamics_matrix.T

class ObservationOperator:
    """Observation operator that maps from model space to observation space"""
    def __init__(self, state_size: int, obs_size: int, obs_indices: Optional[List[int]] = None, seed: Optional[int] = None):
        self.state_size = state_size
        self.obs_size = obs_size
        
        # Initialize random generator with seed if provided
        rng = np.random.default_rng(seed)
        
        # If not specified, randomly select observation locations
        if obs_indices is None:
            self.obs_indices = sorted(rng.choice(state_size, obs_size, replace=False))
        else:
            self.obs_indices = obs_indices
            
        # Create observation matrix H (sparse matrix picking certain locations)
        self.H_matrix = np.zeros((obs_size, state_size))
        for i, idx in enumerate(self.obs_indices):
            self.H_matrix[i, idx] = 1.0
    
    def h(self, x: np.ndarray) -> np.ndarray:
        """Apply observation operator to state vector"""
        return self.H_matrix @ x
        
    def h_linearized(self, x_ref: np.ndarray) -> np.ndarray:
        """Linearized observation operator (same as H for linear case)"""
        return self.H_matrix
        
    def h_adjoint(self, x_ref: np.ndarray) -> np.ndarray:
        """Adjoint of linearized observation operator"""
        return self.H_matrix.T

class AssimilationConfig:
    """Configuration for data assimilation"""
    def __init__(self, 
                 method: str = '3DVar',
                 state_size: int = 40,
                 obs_size: int = 20,
                 time_window: int = 1,
                 max_outer_loops: int = 3,
                 max_inner_loops: int = 100,
                 bg_error_variance: float = 1.0,
                 obs_error_variance: float = 0.5):
        
        self.method = method
        self.state_size = state_size
        self.obs_size = obs_size
        self.time_window = time_window
        self.max_outer_loops = max_outer_loops
        self.max_inner_loops = max_inner_loops
        
        # Create background error covariance (B) with spatial correlation
        self.b_matrix = self._create_b_matrix(bg_error_variance)
        
        # Create observation error covariance (R) - typically diagonal
        self.r_matrix = self._create_r_matrix(obs_error_variance)
        
    def _create_b_matrix(self, variance: float, corr_length: float = 5.0) -> np.ndarray:
        """Create B matrix with spatial correlations"""
        B = np.zeros((self.state_size, self.state_size))
        for i in range(self.state_size):
            for j in range(self.state_size):
                # Apply correlation based on distance
                dist = abs(i - j)
                B[i, j] = variance * np.exp(-dist**2 / (2 * corr_length**2))
        return B
    
    def _create_r_matrix(self, variance: float) -> np.ndarray:
        """Create diagonal R matrix"""
        return np.eye(self.obs_size) * variance

class AssimilationSystem:
    """Core variational data assimilation system"""
    def __init__(self, config: AssimilationConfig, model: NumericalModel):
        self.config = config
        self.model = model
        self.x_b = None  # Background state
        self.obs_ops = []  # List of observation operators for each time
        
    def set_observation_operators(self, obs_ops: List[ObservationOperator]):
        """Set observation operators for each time step"""
        self.obs_ops = obs_ops
        
    def assimilate(self, x_b: np.ndarray, observations: Dict[int, np.ndarray]):
        """Main assimilation process"""
        self.x_b = x_b
        
        if self.config.method == '3DVar':
            return self._3dvar(x_b, observations)
        elif self.config.method == '4DVar':
            return self._4dvar(x_b, observations)
        elif self.config.method == 'DRP4DVar':
            return self._drp_4dvar(x_b, observations)
        else:
            raise ValueError(f"Unknown assimilation method: {self.config.method}")
        
    def _3dvar(self, x_b: np.ndarray, obs: Dict[int, np.ndarray]):
        """3D-Var implementation"""
        # For 3D-Var, we only care about time=0 observations
        if 0 not in obs:
            raise ValueError("Observations at time 0 are required for 3D-Var")
            
        def cost_function(x):
            # Background term
            jb = 0.5 * np.dot((x - x_b).T, np.linalg.solve(self.config.b_matrix, (x - x_b)))
            
            # Observation term
            y_obs = obs[0]  # Single time observation
            hx = self.obs_ops[0].h(x)
            diff = y_obs - hx
            jo = 0.5 * np.dot(diff.T, np.linalg.solve(self.config.r_matrix, diff))
            
            return jb + jo
        
        def grad_function(x):
            # Background term gradient
            grad_jb = np.linalg.solve(self.config.b_matrix, (x - x_b))
            
            # Observation term gradient
            y_obs = obs[0]
            hx = self.obs_ops[0].h(x)
            H = self.obs_ops[0].h_linearized(x)
            R_inv_diff = np.linalg.solve(self.config.r_matrix, (y_obs - hx))
            grad_jo = -H.T @ R_inv_diff
            
            return grad_jb + grad_jo
        
        result = minimize(cost_function, x_b, method='L-BFGS-B', jac=grad_function,
                         options={'maxiter': self.config.max_inner_loops})
        
        return result.x

    def _4dvar(self, x_b: np.ndarray, obs: Dict[int, np.ndarray]):
        """Incremental 4D-Var implementation"""
        x_a = x_b.copy()  # Current analysis, to be updated iteratively
        
        for outer in range(self.config.max_outer_loops):
            print(f"Outer loop {outer+1}/{self.config.max_outer_loops}")
            
            # Generate reference trajectory
            trajectory = self.model.integrate(x_a, self.config.time_window)
            
            # Incremental optimization
            def incremental_cost(dx0):
                # Background term
                jb = 0.5 * np.dot(dx0.T, np.linalg.solve(self.config.b_matrix, dx0))
                
                # Observation term across time window
                jo = 0.0
                dx_t = dx0.copy()
                
                for t in range(self.config.time_window + 1):
                    if t not in obs:
                        continue  # Skip times without observations
                    
                    # Observation operator for this time
                    H = self.obs_ops[t].h_linearized(trajectory[t])
                    
                    # Innovation vector (observation - model)
                    y_obs = obs[t]
                    hx = self.obs_ops[t].h(trajectory[t])
                    innovation = y_obs - hx
                    
                    # Projected increment
                    h_dx = H @ dx_t
                    
                    # Add to cost function
                    diff = innovation - h_dx
                    jo += 0.5 * np.dot(diff.T, np.linalg.solve(self.config.r_matrix, diff))
                    
                    # Propagate increment to next time step (if not the last time)
                    if t < self.config.time_window:
                        M = self.model.tangent_linear(trajectory[t])
                        dx_t = M @ dx_t
                
                return jb + jo
            
            def incremental_grad(dx0):
                # Background term gradient
                grad = np.linalg.solve(self.config.b_matrix, dx0)
                
                # Observation term gradient (adjoint method)
                lambda_t = np.zeros_like(dx0)
                dx_t = dx0.copy()
                
                # Forward sweep for increments
                dx_trajectory = [dx0]
                for t in range(self.config.time_window):
                    M = self.model.tangent_linear(trajectory[t])
                    dx_t = M @ dx_t
                    dx_trajectory.append(dx_t)
                
                # Backward sweep for adjoint
                for t in range(self.config.time_window, -1, -1):
                    if t in obs:
                        H = self.obs_ops[t].h_linearized(trajectory[t])
                        y_obs = obs[t]
                        hx = self.obs_ops[t].h(trajectory[t])
                        h_dx = H @ dx_trajectory[t]
                        
                        # Adjoint forcing
                        R_inv_diff = np.linalg.solve(self.config.r_matrix, (y_obs - hx - h_dx))
                        lambda_t += H.T @ R_inv_diff
                    
                    # Propagate adjoint backwards (if not the first time)
                    if t > 0:
                        M_adj = self.model.adjoint(trajectory[t-1])
                        lambda_t = M_adj @ lambda_t
                
                return grad - lambda_t  # Minus because we're minimizing
            
            # Run minimization for the increment
            result = minimize(incremental_cost, np.zeros_like(x_b), method='L-BFGS-B', 
                             jac=incremental_grad,
                             options={'maxiter': self.config.max_inner_loops})
            
            # Update the analysis with the optimal increment
            x_a += result.x
            
            # Print diagnostics
            print(f"  Final cost: {incremental_cost(result.x):.6f}")
            print(f"  Increment norm: {np.linalg.norm(result.x):.6f}")
        
        return x_a

    def _drp_4dvar(self, x_b: np.ndarray, obs: Dict[int, np.ndarray]):
        """DRP-4DVar implementation with dimensionality reduction and projection"""
        x_a = x_b.copy()  # Current analysis, to be updated iteratively
        # Get dimensions
        n = len(x_b)  # State vector dimension
        
        # Number of ensemble members (K << n)
        k = min(self.config.ensemble_size, n // 5) if hasattr(self.config, 'ensemble_size') else n // 5
        


        # Pre-calculate the inverse of R matrix
        self.config.r_matrix_inv = np.linalg.inv(self.config.r_matrix)
        
        print(f"Running DRP-4DVar with {k} ensemble members")
        
        for outer in range(self.config.max_outer_loops):
            print(f"Outer loop {outer+1}/{self.config.max_outer_loops}")
            
            # Generate reference trajectory
            trajectory = self.model.integrate(x_a, self.config.time_window)
            
            # Generate ensemble perturbations for dimensionality reduction
            # In a real implementation, these would come from an ensemble forecast
            perturbations_x = []
            for i in range(k):
                # Generate random perturbation
                pert = np.random.multivariate_normal(np.zeros(n), self.config.b_matrix)
                perturbations_x.append(pert)
            
            # Compute ensemble mean
            mean_x = np.mean(perturbations_x, axis=0)
            
            # Center the perturbations
            for i in range(k):
                perturbations_x[i] = perturbations_x[i] - mean_x
            
            # Create projection matrix P_x
            P_x = np.column_stack(perturbations_x) / np.sqrt(k-1)
            
            start_time = time.time()
            # Generate corresponding observation space perturbations P_y for each time
            P_y = {}
            for t in range(self.config.time_window + 1):
                if t not in obs:
                    continue
                
                # Pre-compute tangent linear models for propagation
                if t > 0:
                    # Create transition matrices for each timestep
                    transition_matrices = [self.model.tangent_linear(trajectory[i]) for i in range(t)]
                
                # Stack perturbations as columns in a matrix for more efficient operations
                pert_matrix = np.column_stack(perturbations_x)  # n x k matrix
                
                # Propagate all perturbations to time t efficiently
                if t > 0:
                    # Apply transition matrices sequentially
                    dx_matrix = pert_matrix.copy()
                    for M in transition_matrices:
                        dx_matrix = M @ dx_matrix  # M is n x n, dx_matrix is n x k
                else:
                    dx_matrix = pert_matrix
                
                # Project all to observation space at once
                H = self.obs_ops[t].h_linearized(trajectory[t])  # H is m x n
                dy_matrix = H @ dx_matrix  # Result is m x k matrix
                
                # Each column of dy_matrix is already a perturbation in observation space
                
                # Compute mean directly from matrix (across columns)
                mean_y_t = np.mean(dy_matrix, axis=1, keepdims=True)
                
                # Center the perturbations by subtracting mean from each column
                dy_matrix_centered = dy_matrix - mean_y_t
                
                # Matrix is already in the correct format for P_y
                P_y[t] = dy_matrix_centered / np.sqrt(k-1)
            end_time = time.time()
            print(f"Observation space perturbations Time: {end_time - start_time:.3f} seconds")
            

            # DRP cost function in reduced space
            def drp_cost(alpha):
                # nonlocal nonlocal_matpro_time
                # Background term (simplified in reduced space)
                jb = 0.5 * np.dot(alpha, alpha)
                
                # Observation term across time window
                jo = 0.0
                
                for t in range(self.config.time_window + 1):
                    if t not in obs:
                        continue  # Skip times without observations
                    
                    # Innovation vector (observation - model)
                    y_obs = obs[t]
                    hx = self.obs_ops[t].h(trajectory[t])
                    innovation = y_obs - hx
                    
                    # Projected increment
                    h_dx = P_y[t] @ alpha
                    
                    # Add to cost function
                    diff = innovation - h_dx
                    start_time = time.time()

                    jo += 0.5 * np.dot(diff.T, self.config.r_matrix_inv @ diff)
               
                
                return jb + jo
            
            # DRP gradient in reduced space
            def drp_grad(alpha):
                grad = alpha.copy()
                
                # Observation term gradient
                for t in range(self.config.time_window + 1):
                    if t not in obs:
                        continue
                    
                    # Innovation vector
                    y_obs = obs[t]
                    hx = self.obs_ops[t].h(trajectory[t])
                    innovation = y_obs - hx
                    
                    # Projected increment
                    h_dx = P_y[t] @ alpha
                    start_time = time.time()
                    
                    # Add to gradient

                    R_inv_diff = self.config.r_matrix_inv @ (innovation - h_dx)

                    grad -= P_y[t].T @ R_inv_diff
                
                return grad
            
            # Run minimization for the reduced-dimension control variable
            #start_time = time.time()
            result = minimize(drp_cost, np.zeros(k), method='L-BFGS-B', 
                              jac=drp_grad,
                              options={'maxiter': self.config.max_inner_loops})
            #end_time = time.time()
            # print(f"Time taken to run minimization: {end_time - start_time:.3f} seconds")
            # Transform the optimized control variable back to the model space
            optimal_alpha = result.x
            delta_x = P_x @ optimal_alpha
            
            # Update the analysis with the optimal increment
            x_a += delta_x
            
            # Print diagnostics
            # print(f"Time taken to r_matrix solving: {matpro_time:.3f} seconds")
            print(f"  Final cost: {drp_cost(optimal_alpha):.6f}")
            print(f"  Increment norm: {np.linalg.norm(delta_x):.6f}")
        
        return x_a

def generate_truth_and_observations(
    config: AssimilationConfig, 
    model: NumericalModel,
    obs_ops: List[ObservationOperator]
) -> Tuple[np.ndarray, np.ndarray, Dict[int, np.ndarray]]:
    """Generate synthetic truth and observations for testing"""
    
    # Create truth as a simple wave pattern
    x_range = np.linspace(0, 1, config.state_size)
    x_truth = np.sin(2 * np.pi * x_range) + 0.5 * np.sin(6 * np.pi * x_range)
    
    # Generate a perturbed background state
    error = np.random.multivariate_normal(np.zeros(config.state_size), config.b_matrix)
    x_b = x_truth + error
    
    # Generate observations by running truth model
    truth_traj = model.integrate(x_truth, config.time_window)
    
    observations = {}
    for t in range(config.time_window + 1):
        # Apply observation operator
        y_perfect = obs_ops[t].h(truth_traj[t])
        
        # Add observation error
        obs_error = np.random.multivariate_normal(np.zeros(config.obs_size), config.r_matrix)
        y_obs = y_perfect + obs_error
        
        observations[t] = y_obs
    
    return x_truth, x_b, observations

def visualize_results(
    x_truth: np.ndarray,
    x_b: np.ndarray, 
    x_a: np.ndarray,
    observations: Dict[int, np.ndarray],
    obs_ops: List[ObservationOperator],
    title: str
):
    """Visualize the assimilation results"""
    plt.figure(figsize=(10, 6))
    
    # Plot state vectors
    plt.plot(x_truth, 'k-', label='Truth')
    plt.plot(x_b, 'b--', label='Background')
    plt.plot(x_a, 'r-', label='Analysis')
    
    # Plot observations at time 0
    if 0 in observations:
        obs_indices = obs_ops[0].obs_indices
        plt.scatter(obs_indices, observations[0], c='g', marker='o', s=30, label='Observations')
    
    plt.legend()
    plt.title(title)
    plt.xlabel('Grid Point')
    plt.ylabel('Value')
    plt.grid(True)
    plt.tight_layout()
    plt.show()

def run_assimilation_example(config: AssimilationConfig):
    """Run a complete assimilation example"""
    
    # Create model
    model = NumericalModel(config.state_size)
    
    # Create observation operators for each time step
    obs_ops = []
    for t in range(config.time_window + 1):
        # Create operator with randomly placed observations
        op = ObservationOperator(config.state_size, config.obs_size)
        obs_ops.append(op)
    
    # Generate synthetic data
    x_truth, x_b, observations = generate_truth_and_observations(config, model, obs_ops)
    
    # Create and run assimilation system
    assim_system = AssimilationSystem(config, model)
    assim_system.set_observation_operators(obs_ops)
    x_a = assim_system.assimilate(x_b, observations)
    
    # Calculate error statistics
    rmse_b = np.sqrt(np.mean((x_b - x_truth)**2))
    rmse_a = np.sqrt(np.mean((x_a - x_truth)**2))
    
    print(f"\nResults for {config.method}:")
    print(f"Background RMSE: {rmse_b:.4f}")
    print(f"Analysis RMSE: {rmse_a:.4f}")
    print(f"Improvement: {(1 - rmse_a/rmse_b)*100:.1f}%")
    
    # Visualize results
    visualize_results(x_truth, x_b, x_a, observations, obs_ops, 
                     f"{config.method} Assimilation Results")
    
    return x_truth, x_b, x_a, observations
