"""
    GridOperations

A comprehensive Julia module implementing GSI grid operations and domain decomposition,
ported from the GSI Fortran gridmod.F90 and related grid management files.

This module provides:
- Grid initialization and configuration for global and regional domains
- MPI-aware domain decomposition and subdomain management
- Coordinate transformations (geographical, spectral, vertical)
- Grid-to-subdomain and subdomain-to-grid operations
- Parallel grid distribution and communication patterns

# Mathematical Framework

## Grid Coordinates
Global grid uses spherical coordinates (λ, φ, σ):
- λ: longitude (0° to 360°)
- φ: latitude (-90° to 90°) 
- σ: terrain-following vertical coordinate (0 to 1)

Regional grids use projected coordinates (x, y, σ):
- x, y: horizontal coordinates in projection space
- σ: same vertical coordinate as global

## Domain Decomposition
For parallel processing, the domain is decomposed into subdomains:
- Each MPI task handles one subdomain
- Overlap regions for finite difference stencils
- Efficient gather/scatter operations for global operations

# Usage

```julia
using GSICoreAnalysis.GridOperations

# Initialize global grid
config = GlobalGridConfig(
    nlon = 360, nlat = 180, nsig = 64,
    proj = "latlon"
)
grid = initialize_grid(config)

# Initialize regional grid  
config = RegionalGridConfig(
    nx = 200, ny = 150, nsig = 64,
    proj = "lambert_conformal",
    center_lon = -95.0, center_lat = 40.0
)
grid = initialize_grid(config)

# Domain decomposition
decomp = create_decomposition(grid, nproc=16)

# Grid transformations
lon_2d, lat_2d = get_coordinates(grid)
x_proj, y_proj = geographic_to_projection(grid, lon_2d, lat_2d)
```
"""
module GridOperations

using LinearAlgebra
using SparseArrays
using StaticArrays
using Distributed
using FFTW
using Printf

# Define local abstract types to avoid circular dependencies
abstract type AbstractAnalysisConfig end

# Export main types
export AbstractGridConfig, GlobalGridConfig, RegionalGridConfig
export AbstractGrid, GlobalGrid, RegionalGrid
export GridConfiguration  # Alias for compatibility
export AbstractDecomposition, DomainDecomposition
export AbstractProjection, LatLonProjection, LambertConformalProjection, PolarStereographicProjection

# Export main functions
export initialize_grid, create_decomposition
export get_coordinates, geographic_to_projection, projection_to_geographic
export subdomain_to_grid, grid_to_subdomain
export compute_map_factors, compute_rotation_angles

# Grid configuration types
"""
    AbstractGridConfig

Abstract base type for grid configuration parameters.
"""
abstract type AbstractGridConfig <: AbstractAnalysisConfig end

# Type aliases for compatibility
const GridConfiguration = AbstractGridConfig

"""
    GlobalGridConfig

Configuration for global atmospheric grids using spherical coordinates.

# Fields
- `nlon::Int`: Number of longitude points
- `nlat::Int`: Number of latitude points  
- `nsig::Int`: Number of vertical sigma levels
- `proj::String`: Projection type ("latlon", "gaussian")
- `lon_start::Float64`: Starting longitude (degrees)
- `lat_start::Float64`: Starting latitude (degrees)
- `dlon::Float64`: Longitude spacing (degrees)
- `dlat::Float64`: Latitude spacing (degrees)
"""
struct GlobalGridConfig <: AbstractGridConfig
    nlon::Int
    nlat::Int
    nsig::Int
    proj::String
    lon_start::Float64
    lat_start::Float64
    dlon::Float64
    dlat::Float64
    
    function GlobalGridConfig(nlon::Int, nlat::Int, nsig::Int; 
                             proj::String="latlon",
                             lon_start::Float64=0.0,
                             lat_start::Float64=-90.0,
                             dlon::Float64=360.0/nlon,
                             dlat::Float64=180.0/(nlat-1))
        new(nlon, nlat, nsig, proj, lon_start, lat_start, dlon, dlat)
    end
end

"""
    RegionalGridConfig

Configuration for regional atmospheric grids using map projections.

# Fields
- `nx::Int`: Number of x-direction points
- `ny::Int`: Number of y-direction points
- `nsig::Int`: Number of vertical sigma levels
- `proj::String`: Projection type ("lambert_conformal", "polar_stereographic", "mercator")
- `center_lon::Float64`: Central longitude (degrees)
- `center_lat::Float64`: Central latitude (degrees) 
- `dx::Float64`: Grid spacing in x-direction (m)
- `dy::Float64`: Grid spacing in y-direction (m)
- `truelat1::Float64`: First true latitude for conic projections (degrees)
- `truelat2::Float64`: Second true latitude for conic projections (degrees)
"""
struct RegionalGridConfig <: AbstractGridConfig
    nx::Int
    ny::Int
    nsig::Int
    proj::String
    center_lon::Float64
    center_lat::Float64
    dx::Float64
    dy::Float64
    truelat1::Float64
    truelat2::Float64
    
    function RegionalGridConfig(nx::Int, ny::Int, nsig::Int;
                               proj::String="lambert_conformal",
                               center_lon::Float64=-95.0,
                               center_lat::Float64=40.0,
                               dx::Float64=12000.0,
                               dy::Float64=12000.0,
                               truelat1::Float64=33.0,
                               truelat2::Float64=45.0)
        new(nx, ny, nsig, proj, center_lon, center_lat, dx, dy, truelat1, truelat2)
    end
end

# Projection types
"""
    AbstractProjection

Abstract base type for map projections.
"""
abstract type AbstractProjection end

"""
    LatLonProjection

Latitude-longitude (equirectangular) projection.
"""
struct LatLonProjection <: AbstractProjection
    lon_start::Float64
    lat_start::Float64
    dlon::Float64
    dlat::Float64
end

"""
    LambertConformalProjection  

Lambert conformal conic projection with standard parallels.
"""
struct LambertConformalProjection <: AbstractProjection
    center_lon::Float64
    center_lat::Float64
    truelat1::Float64
    truelat2::Float64
    dx::Float64
    dy::Float64
    
    # Computed projection parameters
    n::Float64           # Cone constant
    f::Float64           # Flattening factor
    rho0::Float64        # Reference radius of curvature
    
    function LambertConformalProjection(center_lon, center_lat, truelat1, truelat2, dx, dy)
        # Earth radius (m)
        earth_radius = 6371200.0
        
        # Convert degrees to radians
        phi0 = deg2rad(center_lat)
        phi1 = deg2rad(truelat1)
        phi2 = deg2rad(truelat2)
        
        # Compute projection constants
        if abs(phi1 - phi2) < 1e-10
            n = sin(phi1)
        else
            n = log(cos(phi1)/cos(phi2)) / log(tan(π/4 + phi2/2)/tan(π/4 + phi1/2))
        end
        
        f = (cos(phi1) * tan(π/4 + phi1/2)^n) / n
        rho0 = earth_radius * f / tan(π/4 + phi0/2)^n
        
        new(center_lon, center_lat, truelat1, truelat2, dx, dy, n, f, rho0)
    end
end

"""
    PolarStereographicProjection

Polar stereographic projection for high-latitude regions.
"""
struct PolarStereographicProjection <: AbstractProjection
    center_lon::Float64
    center_lat::Float64
    truelat::Float64
    dx::Float64
    dy::Float64
end

# Grid types
"""
    AbstractGrid

Abstract base type for atmospheric grids.
"""
abstract type AbstractGrid end

"""
    GlobalGrid

Global atmospheric grid with spherical coordinates and spectral capabilities.

# Fields
- `config::GlobalGridConfig`: Grid configuration
- `proj::AbstractProjection`: Map projection
- `lon::Matrix{Float64}`: Longitude coordinates (nlon × nlat)
- `lat::Matrix{Float64}`: Latitude coordinates (nlon × nlat)
- `sigma::Vector{Float64}`: Vertical sigma coordinates (nsig)
- `map_factors::NamedTuple`: Map scale factors for metric calculations
"""
struct GlobalGrid <: AbstractGrid
    config::GlobalGridConfig
    proj::AbstractProjection
    lon::Matrix{Float64}
    lat::Matrix{Float64}
    sigma::Vector{Float64}
    map_factors::NamedTuple{(:m1, :m2), Tuple{Matrix{Float64}, Matrix{Float64}}}
    
    function GlobalGrid(config::GlobalGridConfig)
        # Create coordinate arrays
        lon_1d = config.lon_start .+ (0:config.nlon-1) .* config.dlon
        lat_1d = config.lat_start .+ (0:config.nlat-1) .* config.dlat
        
        # Create 2D coordinate grids
        lon = repeat(lon_1d', config.nlat, 1)
        lat = repeat(lat_1d, 1, config.nlon)
        
        # Standard sigma coordinates (equally spaced)
        sigma = collect(range(0.0, 1.0, length=config.nsig))
        
        # Create projection
        proj = LatLonProjection(config.lon_start, config.lat_start, config.dlon, config.dlat)
        
        # Compute map factors (identity for lat-lon)
        m1 = ones(Float64, config.nlat, config.nlon)
        m2 = ones(Float64, config.nlat, config.nlon)
        map_factors = (m1=m1, m2=m2)
        
        new(config, proj, lon, lat, sigma, map_factors)
    end
end

"""
    RegionalGrid

Regional atmospheric grid with map projection and limited area domain.

# Fields
- `config::RegionalGridConfig`: Grid configuration
- `proj::AbstractProjection`: Map projection
- `lon::Matrix{Float64}`: Longitude coordinates (nx × ny)
- `lat::Matrix{Float64}`: Latitude coordinates (nx × ny)
- `x::Matrix{Float64}`: Projected x coordinates (nx × ny)
- `y::Matrix{Float64}`: Projected y coordinates (nx × ny)  
- `sigma::Vector{Float64}`: Vertical sigma coordinates (nsig)
- `map_factors::NamedTuple`: Map scale factors and rotation angles
"""
struct RegionalGrid <: AbstractGrid
    config::RegionalGridConfig
    proj::AbstractProjection
    lon::Matrix{Float64}
    lat::Matrix{Float64}
    x::Matrix{Float64}
    y::Matrix{Float64}
    sigma::Vector{Float64}
    map_factors::NamedTuple{(:m1, :m2, :cos_alpha, :sin_alpha), 
                           Tuple{Matrix{Float64}, Matrix{Float64}, Matrix{Float64}, Matrix{Float64}}}
    
    function RegionalGrid(config::RegionalGridConfig)
        # Create projected coordinate arrays
        x_1d = ((1:config.nx) .- (config.nx + 1)/2) .* config.dx
        y_1d = ((1:config.ny) .- (config.ny + 1)/2) .* config.dy
        
        # Create 2D projected coordinate grids
        x = repeat(x_1d', config.ny, 1)
        y = repeat(y_1d, 1, config.nx)
        
        # Create projection based on type
        if config.proj == "lambert_conformal"
            proj = LambertConformalProjection(config.center_lon, config.center_lat,
                                            config.truelat1, config.truelat2,
                                            config.dx, config.dy)
        elseif config.proj == "polar_stereographic"
            proj = PolarStereographicProjection(config.center_lon, config.center_lat,
                                              config.truelat1, config.dx, config.dy)
        else
            error("Unsupported projection: $(config.proj)")
        end
        
        # Convert projected coordinates to geographic
        lon, lat = projection_to_geographic(proj, x, y)
        
        # Standard sigma coordinates
        sigma = collect(range(0.0, 1.0, length=config.nsig))
        
        # Compute map factors and rotation angles
        map_factors = compute_map_factors(proj, x, y, lon, lat)
        
        new(config, proj, lon, lat, x, y, sigma, map_factors)
    end
end

# Domain decomposition types
"""
    AbstractDecomposition

Abstract base type for domain decomposition strategies.
"""
abstract type AbstractDecomposition end

"""
    DomainDecomposition

MPI-aware domain decomposition for parallel grid operations.

# Fields
- `grid::AbstractGrid`: Associated grid
- `nproc::Int`: Total number of MPI processes
- `nprocx::Int`: Number of processes in x-direction
- `nprocy::Int`: Number of processes in y-direction
- `subdomain_sizes::Matrix{Tuple{Int,Int}}`: Local domain sizes (nprocx × nprocy)
- `subdomain_starts::Matrix{Tuple{Int,Int}}`: Local domain start indices
- `halo_width::Int`: Halo width for inter-process communication
- `global_to_local::Function`: Global to local index mapping
- `local_to_global::Function`: Local to global index mapping
"""
struct DomainDecomposition <: AbstractDecomposition
    grid::AbstractGrid
    nproc::Int
    nprocx::Int
    nprocy::Int
    subdomain_sizes::Matrix{Tuple{Int,Int}}
    subdomain_starts::Matrix{Tuple{Int,Int}}
    halo_width::Int
    global_to_local::Function
    local_to_global::Function
    
    function DomainDecomposition(grid::AbstractGrid, nproc::Int; halo_width::Int=3)
        # Determine optimal processor layout
        nprocx, nprocy = optimize_processor_layout(grid, nproc)
        
        # Compute subdomain boundaries
        if isa(grid, GlobalGrid)
            nx, ny = grid.config.nlon, grid.config.nlat
        else  # RegionalGrid
            nx, ny = grid.config.nx, grid.config.ny
        end
        
        subdomain_sizes = compute_subdomain_sizes(nx, ny, nprocx, nprocy)
        subdomain_starts = compute_subdomain_starts(subdomain_sizes)
        
        # Create mapping functions
        global_to_local = create_global_to_local_mapping(subdomain_sizes, subdomain_starts, nprocx, nprocy)
        local_to_global = create_local_to_global_mapping(subdomain_sizes, subdomain_starts, nprocx, nprocy)
        
        new(grid, nproc, nprocx, nprocy, subdomain_sizes, subdomain_starts, 
            halo_width, global_to_local, local_to_global)
    end
end

# Main interface functions

"""
    initialize_grid(config::AbstractGridConfig) -> AbstractGrid

Initialize a grid based on the provided configuration.

# Arguments
- `config`: Grid configuration (GlobalGridConfig or RegionalGridConfig)

# Returns
- Grid object (GlobalGrid or RegionalGrid)

# Example
```julia
config = GlobalGridConfig(360, 180, 64)
grid = initialize_grid(config)
```
"""
function initialize_grid(config::GlobalGridConfig)
    return GlobalGrid(config)
end

function initialize_grid(config::RegionalGridConfig)
    return RegionalGrid(config)
end

"""
    create_decomposition(grid::AbstractGrid, nproc::Int; halo_width::Int=3) -> DomainDecomposition

Create domain decomposition for parallel processing.

# Arguments
- `grid`: Grid to decompose
- `nproc`: Number of MPI processes
- `halo_width`: Width of halo region for inter-process communication

# Returns
- Domain decomposition structure

# Example
```julia
grid = initialize_grid(config)
decomp = create_decomposition(grid, 16)
```
"""
function create_decomposition(grid::AbstractGrid, nproc::Int; halo_width::Int=3)
    return DomainDecomposition(grid, nproc; halo_width=halo_width)
end

"""
    get_coordinates(grid::AbstractGrid) -> Tuple

Get coordinate arrays from grid.

# Arguments
- `grid`: Grid object

# Returns
- For GlobalGrid: (lon, lat, sigma)
- For RegionalGrid: (lon, lat, x, y, sigma)

# Example
```julia
lon, lat, sigma = get_coordinates(global_grid)
lon, lat, x, y, sigma = get_coordinates(regional_grid)
```
"""
function get_coordinates(grid::GlobalGrid)
    return (grid.lon, grid.lat, grid.sigma)
end

function get_coordinates(grid::RegionalGrid)
    return (grid.lon, grid.lat, grid.x, grid.y, grid.sigma)
end

# Coordinate transformation functions

"""
    geographic_to_projection(proj::AbstractProjection, lon, lat) -> Tuple{Matrix{Float64}, Matrix{Float64}}

Convert geographic coordinates (longitude, latitude) to projected coordinates (x, y).

# Arguments
- `proj`: Map projection
- `lon`: Longitude array (degrees)
- `lat`: Latitude array (degrees)

# Returns
- Tuple of (x, y) projected coordinates

# Mathematical Details
For Lambert Conformal projection:
```
n = ln(cos(φ₁)/cos(φ₂)) / ln(tan(π/4 + φ₂/2)/tan(π/4 + φ₁/2))
F = cos(φ₁) * tan(π/4 + φ₁/2)ⁿ / n
ρ = a * F / tan(π/4 + φ/2)ⁿ
θ = n * (λ - λ₀)
x = ρ * sin(θ)
y = ρ₀ - ρ * cos(θ)
```
"""
function geographic_to_projection(proj::LatLonProjection, lon, lat)
    # For lat-lon projection, x and y are just scaled lon and lat
    x = (lon .- proj.lon_start) / proj.dlon
    y = (lat .- proj.lat_start) / proj.dlat
    return (x, y)
end

function geographic_to_projection(proj::LambertConformalProjection, lon, lat)
    # Earth radius (m)
    earth_radius = 6371200.0
    
    # Convert to radians
    phi = deg2rad.(lat)
    lambda = deg2rad.(lon .- proj.center_lon)
    
    # Compute radius of curvature
    rho = earth_radius * proj.f ./ tan.(π/4 .+ phi./2).^proj.n
    
    # Compute angle
    theta = proj.n .* lambda
    
    # Projected coordinates
    x = rho .* sin.(theta)
    y = proj.rho0 .- rho .* cos.(theta)
    
    return (x, y)
end

function geographic_to_projection(proj::PolarStereographicProjection, lon, lat)
    # Earth radius (m)  
    earth_radius = 6371200.0
    
    # Convert to radians
    phi = deg2rad.(lat)
    lambda = deg2rad.(lon .- proj.center_lon)
    
    # Scale factor
    if proj.center_lat > 0  # North pole
        k = 2 * earth_radius / (1 + sin.(phi))
    else  # South pole
        k = 2 * earth_radius / (1 - sin.(phi))
    end
    
    # Projected coordinates
    x = k .* cos.(phi) .* sin.(lambda)
    y = k .* cos.(phi) .* cos.(lambda)
    
    return (x, y)
end

"""
    projection_to_geographic(proj::AbstractProjection, x, y) -> Tuple{Matrix{Float64}, Matrix{Float64}}

Convert projected coordinates (x, y) to geographic coordinates (longitude, latitude).

# Arguments
- `proj`: Map projection
- `x`: x coordinate array
- `y`: y coordinate array

# Returns
- Tuple of (lon, lat) geographic coordinates (degrees)
"""
function projection_to_geographic(proj::LatLonProjection, x, y)
    lon = proj.lon_start .+ x .* proj.dlon
    lat = proj.lat_start .+ y .* proj.dlat
    return (lon, lat)
end

function projection_to_geographic(proj::LambertConformalProjection, x, y)
    # Earth radius (m)
    earth_radius = 6371200.0
    
    # Compute radius and angle
    rho = sign.(proj.n) .* sqrt.(x.^2 .+ (proj.rho0 .- y).^2)
    theta = atan.(x, proj.rho0 .- y)
    
    # Convert to geographic coordinates
    lon = rad2deg.(theta ./ proj.n) .+ proj.center_lon
    lat = rad2deg.(2 .* atan.((earth_radius * proj.f ./ rho).^(1/proj.n)) .- π/2)
    
    return (lon, lat)
end

function projection_to_geographic(proj::PolarStereographicProjection, x, y)
    # Earth radius (m)
    earth_radius = 6371200.0
    
    # Compute radius
    rho = sqrt.(x.^2 .+ y.^2)
    
    # Convert to geographic coordinates
    if proj.center_lat > 0  # North pole
        lat = rad2deg.(asin.((4 * earth_radius^2 .- rho.^2) ./ (4 * earth_radius^2 .+ rho.^2)))
    else  # South pole
        lat = -rad2deg.(asin.((4 * earth_radius^2 .- rho.^2) ./ (4 * earth_radius^2 .+ rho.^2)))
    end
    
    lon = proj.center_lon .+ rad2deg.(atan.(x, y))
    
    return (lon, lat)
end

# Map factors and rotation angles

"""
    compute_map_factors(proj::AbstractProjection, x, y, lon, lat) -> NamedTuple

Compute map scale factors and rotation angles for the given projection.

# Arguments
- `proj`: Map projection
- `x, y`: Projected coordinates
- `lon, lat`: Geographic coordinates

# Returns
- NamedTuple with map factors (m1, m2) and rotation angles (cos_alpha, sin_alpha)

# Mathematical Details
Map factors represent the ratio of distances on the projected grid to true distances:
- `m1`: Map factor in first horizontal direction
- `m2`: Map factor in second horizontal direction
- `cos_alpha, sin_alpha`: Cosine and sine of rotation angle between grid and geographic coordinates
"""
function compute_map_factors(proj::LatLonProjection, x, y, lon, lat)
    # For lat-lon projection, map factors are simple
    ny, nx = size(lon)
    m1 = ones(Float64, ny, nx)
    m2 = cos.(deg2rad.(lat))  # Cosine of latitude
    cos_alpha = ones(Float64, ny, nx)
    sin_alpha = zeros(Float64, ny, nx)
    
    return (m1=m1, m2=m2, cos_alpha=cos_alpha, sin_alpha=sin_alpha)
end

function compute_map_factors(proj::LambertConformalProjection, x, y, lon, lat)
    ny, nx = size(lon)
    
    # Map factors for Lambert Conformal are functions of latitude
    phi = deg2rad.(lat)
    m1 = ones(Float64, ny, nx)  # Conformal projection preserves scale locally
    m2 = ones(Float64, ny, nx)
    
    # Rotation angle between grid and geographic north
    # For Lambert Conformal: α = n * (λ - λ₀)
    lambda = deg2rad.(lon .- proj.center_lon)
    alpha = proj.n .* lambda
    
    cos_alpha = cos.(alpha)
    sin_alpha = sin.(alpha)
    
    return (m1=m1, m2=m2, cos_alpha=cos_alpha, sin_alpha=sin_alpha)
end

function compute_map_factors(proj::PolarStereographicProjection, x, y, lon, lat)
    ny, nx = size(lon)
    
    # Map factors for polar stereographic
    phi = deg2rad.(lat)
    if proj.center_lat > 0  # North pole
        scale_factor = (1 + sin.(deg2rad(proj.truelat))) ./ (1 + sin.(phi))
    else  # South pole  
        scale_factor = (1 - sin.(deg2rad(proj.truelat))) ./ (1 - sin.(phi))
    end
    
    m1 = scale_factor
    m2 = scale_factor
    
    # Rotation angle
    lambda = deg2rad.(lon .- proj.center_lon)
    if proj.center_lat > 0
        cos_alpha = cos.(lambda)
        sin_alpha = sin.(lambda)
    else
        cos_alpha = cos.(-lambda)
        sin_alpha = sin.(-lambda)
    end
    
    return (m1=m1, m2=m2, cos_alpha=cos_alpha, sin_alpha=sin_alpha)
end

"""
    compute_rotation_angles(grid::AbstractGrid) -> Tuple{Matrix{Float64}, Matrix{Float64}}

Compute rotation angles for converting between grid-relative and earth-relative winds.

# Arguments
- `grid`: Grid object

# Returns
- Tuple of (cos_alpha, sin_alpha) rotation angle components

# Usage
```julia
cos_alpha, sin_alpha = compute_rotation_angles(grid)
u_earth = u_grid * cos_alpha - v_grid * sin_alpha
v_earth = u_grid * sin_alpha + v_grid * cos_alpha
```
"""
function compute_rotation_angles(grid::GlobalGrid)
    return (grid.map_factors.cos_alpha, grid.map_factors.sin_alpha)
end

function compute_rotation_angles(grid::RegionalGrid)
    return (grid.map_factors.cos_alpha, grid.map_factors.sin_alpha)
end

# Grid-subdomain operations

"""
    subdomain_to_grid(decomp::DomainDecomposition, local_data::AbstractArray, rank::Int) -> AbstractArray

Gather subdomain data to global grid for MPI process with given rank.

# Arguments
- `decomp`: Domain decomposition
- `local_data`: Local subdomain data
- `rank`: MPI rank (0-based)

# Returns
- Global grid data (only valid on gathering process)
"""
function subdomain_to_grid(decomp::DomainDecomposition, local_data::AbstractArray, rank::Int=0)
    # This would typically use MPI_Gather in a real implementation
    # For now, we'll implement a serial version for testing
    
    if isa(decomp.grid, GlobalGrid)
        nx, ny = decomp.grid.config.nlon, decomp.grid.config.nlat
    else
        nx, ny = decomp.grid.config.nx, decomp.grid.config.ny
    end
    
    nz = ndims(local_data) > 2 ? size(local_data, 3) : 1
    global_data = zeros(eltype(local_data), ny, nx, nz)
    
    # Simple serial assembly (would be replaced with MPI operations)
    proc_rank = 0
    for j in 1:decomp.nprocy
        for i in 1:decomp.nprocx
            if proc_rank == rank
                istart, jstart = decomp.subdomain_starts[i, j]
                isize, jsize = decomp.subdomain_sizes[i, j]
                
                if ndims(local_data) == 2
                    global_data[jstart:jstart+jsize-1, istart:istart+isize-1] = local_data
                else
                    global_data[jstart:jstart+jsize-1, istart:istart+isize-1, :] = local_data
                end
                break
            end
            proc_rank += 1
        end
    end
    
    return global_data
end

"""
    grid_to_subdomain(decomp::DomainDecomposition, global_data::AbstractArray, rank::Int) -> AbstractArray

Scatter global grid data to subdomains for MPI process with given rank.

# Arguments
- `decomp`: Domain decomposition
- `global_data`: Global grid data
- `rank`: MPI rank (0-based)

# Returns
- Local subdomain data for the specified rank
"""
function grid_to_subdomain(decomp::DomainDecomposition, global_data::AbstractArray, rank::Int)
    # Find the processor layout for this rank
    proc_rank = 0
    for j in 1:decomp.nprocy
        for i in 1:decomp.nprocx
            if proc_rank == rank
                istart, jstart = decomp.subdomain_starts[i, j]
                isize, jsize = decomp.subdomain_sizes[i, j]
                
                if ndims(global_data) == 2
                    return global_data[jstart:jstart+jsize-1, istart:istart+isize-1]
                else
                    return global_data[jstart:jstart+jsize-1, istart:istart+isize-1, :]
                end
            end
            proc_rank += 1
        end
    end
    
    error("Invalid rank: $rank")
end

# Utility functions for domain decomposition

"""
    optimize_processor_layout(grid::AbstractGrid, nproc::Int) -> Tuple{Int, Int}

Determine optimal processor layout (nprocx, nprocy) for given number of processes.

# Arguments
- `grid`: Grid object
- `nproc`: Total number of MPI processes

# Returns
- Tuple of (nprocx, nprocy) with nprocx * nprocy = nproc
"""
function optimize_processor_layout(grid::AbstractGrid, nproc::Int)
    if isa(grid, GlobalGrid)
        nx, ny = grid.config.nlon, grid.config.nlat
    else
        nx, ny = grid.config.nx, grid.config.ny
    end
    
    # Find factors of nproc that minimize communication
    best_ratio = Inf
    best_nprocx, best_nprocy = 1, nproc
    
    for nprocx in 1:nproc
        if nproc % nprocx == 0
            nprocy = nproc ÷ nprocx
            
            # Estimate load balance and communication cost
            subdom_x = nx ÷ nprocx
            subdom_y = ny ÷ nprocy
            aspect_ratio = abs(subdom_x / subdom_y - 1.0)
            
            if aspect_ratio < best_ratio
                best_ratio = aspect_ratio
                best_nprocx, best_nprocy = nprocx, nprocy
            end
        end
    end
    
    return (best_nprocx, best_nprocy)
end

"""
    compute_subdomain_sizes(nx::Int, ny::Int, nprocx::Int, nprocy::Int) -> Matrix{Tuple{Int,Int}}

Compute subdomain sizes for each processor in the decomposition.
"""
function compute_subdomain_sizes(nx::Int, ny::Int, nprocx::Int, nprocy::Int)
    sizes = Matrix{Tuple{Int,Int}}(undef, nprocx, nprocy)
    
    # Base sizes
    base_nx = nx ÷ nprocx
    base_ny = ny ÷ nprocy
    
    # Extra points for load balancing
    extra_nx = nx % nprocx
    extra_ny = ny % nprocy
    
    for j in 1:nprocy
        for i in 1:nprocx
            # Distribute extra points to first few processors
            local_nx = base_nx + (i <= extra_nx ? 1 : 0)
            local_ny = base_ny + (j <= extra_ny ? 1 : 0)
            sizes[i, j] = (local_nx, local_ny)
        end
    end
    
    return sizes
end

"""
    compute_subdomain_starts(sizes::Matrix{Tuple{Int,Int}}) -> Matrix{Tuple{Int,Int}}

Compute starting indices for each subdomain.
"""
function compute_subdomain_starts(sizes::Matrix{Tuple{Int,Int}})
    nprocx, nprocy = size(sizes)
    starts = Matrix{Tuple{Int,Int}}(undef, nprocx, nprocy)
    
    for j in 1:nprocy
        for i in 1:nprocx
            # Compute starting indices
            istart = 1 + sum(sizes[ii, j][1] for ii in 1:i-1)
            jstart = 1 + sum(sizes[i, jj][2] for jj in 1:j-1)
            starts[i, j] = (istart, jstart)
        end
    end
    
    return starts
end

"""
    create_global_to_local_mapping(sizes, starts, nprocx, nprocy) -> Function

Create function to map global indices to local subdomain indices.
"""
function create_global_to_local_mapping(sizes, starts, nprocx, nprocy)
    function global_to_local(i_global::Int, j_global::Int)
        # Find which subdomain contains this global point
        for j in 1:nprocy
            for i in 1:nprocx
                istart, jstart = starts[i, j]
                isize, jsize = sizes[i, j]
                
                if istart <= i_global < istart + isize && 
                   jstart <= j_global < jstart + jsize
                    # Return local indices and processor rank
                    i_local = i_global - istart + 1
                    j_local = j_global - jstart + 1
                    rank = (j - 1) * nprocx + (i - 1)
                    return (i_local, j_local, rank)
                end
            end
        end
        error("Global indices ($i_global, $j_global) not found in any subdomain")
    end
    
    return global_to_local
end

"""
    create_local_to_global_mapping(sizes, starts, nprocx, nprocy) -> Function

Create function to map local subdomain indices to global indices.
"""
function create_local_to_global_mapping(sizes, starts, nprocx, nprocy)
    function local_to_global(i_local::Int, j_local::Int, rank::Int)
        # Convert rank to processor coordinates
        j_proc = rank ÷ nprocx + 1
        i_proc = rank % nprocx + 1
        
        # Get starting indices for this processor
        istart, jstart = starts[i_proc, j_proc]
        
        # Compute global indices
        i_global = istart + i_local - 1
        j_global = jstart + j_local - 1
        
        return (i_global, j_global)
    end
    
    return local_to_global
end

end # module GridOperations