# Parallel I/O and Visualization for NSEMSolver.jl
# Scalable file I/O, checkpointing, and visualization output

using Printf

"""
    ParallelIOConfig

Configuration for parallel I/O operations.
"""
struct ParallelIOConfig
    output_directory::String
    file_prefix::String
    use_mpi_io::Bool                    # Use MPI-I/O for better performance
    collective_io::Bool                 # Use collective I/O operations
    stripe_size::Int                    # Parallel file system stripe size
    num_aggregators::Int                # Number of I/O aggregator processes
    compression_level::Int              # Compression level (0-9, 0=none)
    checkpoint_frequency::Int           # Checkpoint every N steps
    visualization_frequency::Int        # Output visualization every N steps
end

"""
    create_parallel_io_config(output_dir::String="output"; kwargs...) -> ParallelIOConfig

Create parallel I/O configuration with sensible defaults.
"""
function create_parallel_io_config(output_dir::String="output";
                                  file_prefix::String="nsem",
                                  use_mpi_io::Bool=true,
                                  collective_io::Bool=true,
                                  stripe_size::Int=1024*1024,  # 1MB
                                  num_aggregators::Int=0,      # Auto-detect
                                  compression_level::Int=1,
                                  checkpoint_frequency::Int=100,
                                  visualization_frequency::Int=10)
    
    return ParallelIOConfig(
        output_dir, file_prefix, use_mpi_io, collective_io,
        stripe_size, num_aggregators, compression_level,
        checkpoint_frequency, visualization_frequency
    )
end

"""
    ParallelVTKWriter

Writer for parallel VTK output with domain decomposition support.
"""
struct ParallelVTKWriter
    config::ParallelIOConfig
    pdomain::ParallelDomain
    timer::ParallelTimer
    
    # VTK-specific settings
    file_format::Symbol                 # :ascii, :binary, :xml
    precision::Symbol                   # :single, :double
    
    # File management
    current_file_index::Base.RefValue{Int}
    
    function ParallelVTKWriter(config::ParallelIOConfig, pdomain::ParallelDomain;
                              file_format::Symbol=:xml, precision::Symbol=:double)
        timer = create_parallel_timer(pdomain.mpi_ctx)
        new(config, pdomain, timer, file_format, precision, Ref(0))
    end
end

"""
    export_parallel_vtk(result::NSResult, pdomain::ParallelDomain, filename::String;
                       include_ghost::Bool=false)

Export parallel VTK files with domain decomposition information.
"""
function export_parallel_vtk(result::NSResult, pdomain::ParallelDomain, filename::String;
                            include_ghost::Bool=false)
    
    start_time = wtime()
    
    # Create output directory if it doesn't exist
    if is_root(pdomain.mpi_ctx)
        mkpath(dirname(filename))
    end
    barrier(pdomain.mpi_ctx)
    
    # Generate process-specific filename
    base_name = splitext(basename(filename))[1]
    dir_name = dirname(filename)
    proc_filename = joinpath(dir_name, "$(base_name)_p$(pdomain.rank).vtu")
    
    # Write local VTK file for this process
    write_local_vtk_file(proc_filename, result, pdomain, include_ghost)
    
    # Root process creates master PVTU file
    if is_root(pdomain.mpi_ctx)
        master_filename = joinpath(dir_name, "$(base_name).pvtu")
        write_parallel_vtk_master_file(master_filename, base_name, pdomain)
    end
    
    barrier(pdomain.mpi_ctx)
    
    elapsed_time = wtime() - start_time
    if is_root(pdomain.mpi_ctx)
        @info "Parallel VTK export completed in $(elapsed_time:.3f) seconds"
    end
end

"""
    write_local_vtk_file(filename::String, result::NSResult, pdomain::ParallelDomain,
                        include_ghost::Bool)

Write VTK file for local domain portion.
"""
function write_local_vtk_file(filename::String, result::NSResult, pdomain::ParallelDomain,
                             include_ghost::Bool)
    
    # Create basic VTK XML structure
    open(filename, "w") do io
        println(io, """<?xml version="1.0"?>""")
        println(io, """<VTKFile type="UnstructuredGrid" version="1.0" byte_order="LittleEndian">""")
        println(io, """  <UnstructuredGrid>""")
        
        # Compute grid dimensions for local domain
        n_local = pdomain.local_domain.n_block
        n_points_1d = pdomain.local_domain.n + 1
        
        if pdomain.local_domain.dim == 2
            n_points = n_points_1d^2 * n_local^2
            n_cells = pdomain.local_domain.n^2 * n_local^2
        else
            n_points = n_points_1d^3 * n_local^3
            n_cells = pdomain.local_domain.n^3 * n_local^3
        end
        
        println(io, """    <Piece NumberOfPoints="$n_points" NumberOfCells="$n_cells">""")
        
        # Write points (coordinates)
        println(io, """      <Points>""")
        println(io, """        <DataArray type="Float64" NumberOfComponents="3" format="ascii">""")
        
        # Generate local coordinates (simplified)
        for k in 0:(pdomain.local_domain.dim == 3 ? n_points_1d-1 : 0)
            for j in 0:n_points_1d-1
                for i in 0:n_points_1d-1
                    x = Float64(i) / n_points_1d
                    y = Float64(j) / n_points_1d  
                    z = pdomain.local_domain.dim == 3 ? Float64(k) / n_points_1d : 0.0
                    println(io, "          $x $y $z")
                end
            end
        end
        
        println(io, """        </DataArray>""")
        println(io, """      </Points>""")
        
        # Write cells (connectivity)
        println(io, """      <Cells>""")
        println(io, """        <DataArray type="Int64" Name="connectivity" format="ascii">""")
        
        # Generate cell connectivity (simplified for quad/hex elements)
        cell_id = 0
        if pdomain.local_domain.dim == 2
            for j in 0:pdomain.local_domain.n*n_local-1
                for i in 0:pdomain.local_domain.n*n_local-1
                    # Quad connectivity
                    p1 = j * n_points_1d + i
                    p2 = j * n_points_1d + (i + 1)
                    p3 = (j + 1) * n_points_1d + (i + 1)
                    p4 = (j + 1) * n_points_1d + i
                    println(io, "          $p1 $p2 $p3 $p4")
                end
            end
        else
            # 3D hex connectivity (simplified)
            for k in 0:pdomain.local_domain.n*n_local-1
                for j in 0:pdomain.local_domain.n*n_local-1
                    for i in 0:pdomain.local_domain.n*n_local-1
                        # Hex connectivity (8 vertices)
                        base = k * n_points_1d^2 + j * n_points_1d + i
                        vertices = [
                            base, base + 1, base + n_points_1d + 1, base + n_points_1d,
                            base + n_points_1d^2, base + n_points_1d^2 + 1,
                            base + n_points_1d^2 + n_points_1d + 1, base + n_points_1d^2 + n_points_1d
                        ]
                        println(io, "          " * join(vertices, " "))
                    end
                end
            end
        end
        
        println(io, """        </DataArray>""")
        
        # Write cell offsets
        println(io, """        <DataArray type="Int64" Name="offsets" format="ascii">""")
        vertices_per_cell = pdomain.local_domain.dim == 2 ? 4 : 8
        for i in 1:n_cells
            println(io, "          $(i * vertices_per_cell)")
        end
        println(io, """        </DataArray>""")
        
        # Write cell types
        println(io, """        <DataArray type="UInt8" Name="types" format="ascii">""")
        cell_type = pdomain.local_domain.dim == 2 ? 9 : 12  # VTK_QUAD : VTK_HEXAHEDRON
        for i in 1:n_cells
            println(io, "          $cell_type")
        end
        println(io, """        </DataArray>""")
        println(io, """      </Cells>""")
        
        # Write point data (solution fields)
        println(io, """      <PointData Scalars="pressure" Vectors="velocity">""")
        
        # Velocity field
        println(io, """        <DataArray type="Float64" Name="velocity" NumberOfComponents="3" format="ascii">""")
        for k in 1:(pdomain.local_domain.dim == 3 ? size(result.u, 3) : 1)
            for j in 1:size(result.u, 2)
                for i in 1:size(result.u, 1)
                    u_val = result.u[i, j, k]
                    v_val = result.v[i, j, k]
                    w_val = result.w !== nothing ? result.w[i, j, k] : 0.0
                    println(io, "          $u_val $v_val $w_val")
                end
            end
        end
        println(io, """        </DataArray>""")
        
        # Pressure field  
        println(io, """        <DataArray type="Float64" Name="pressure" format="ascii">""")
        for k in 1:(pdomain.local_domain.dim == 3 ? size(result.p, 3) : 1)
            for j in 1:size(result.p, 2)
                for i in 1:size(result.p, 1)
                    p_val = result.p[i, j, k]
                    println(io, "          $p_val")
                end
            end
        end
        println(io, """        </DataArray>""")
        
        # Add domain decomposition information
        println(io, """        <DataArray type="Int32" Name="ProcessRank" format="ascii">""")
        for i in 1:n_points
            println(io, "          $(pdomain.rank)")
        end
        println(io, """        </DataArray>""")
        
        println(io, """      </PointData>""")
        
        println(io, """    </Piece>""")
        println(io, """  </UnstructuredGrid>""")
        println(io, """</VTKFile>""")
    end
end

"""
    write_parallel_vtk_master_file(filename::String, base_name::String, pdomain::ParallelDomain)

Write PVTU master file that references all process-local VTU files.
"""
function write_parallel_vtk_master_file(filename::String, base_name::String, pdomain::ParallelDomain)
    open(filename, "w") do io
        println(io, """<?xml version="1.0"?>""")
        println(io, """<VTKFile type="PUnstructuredGrid" version="1.0" byte_order="LittleEndian">""")
        println(io, """  <PUnstructuredGrid GhostLevel="1">""")
        
        # Write point data structure
        println(io, """    <PPointData Scalars="pressure" Vectors="velocity">""")
        println(io, """      <PDataArray type="Float64" Name="velocity" NumberOfComponents="3"/>""")
        println(io, """      <PDataArray type="Float64" Name="pressure"/>""")
        println(io, """      <PDataArray type="Int32" Name="ProcessRank"/>""")
        println(io, """    </PPointData>""")
        
        # Write points structure
        println(io, """    <PPoints>""")
        println(io, """      <PDataArray type="Float64" NumberOfComponents="3"/>""")
        println(io, """    </PPoints>""")
        
        # Reference all process files
        for rank in 0:pdomain.num_procs-1
            proc_filename = "$(base_name)_p$(rank).vtu"
            println(io, """    <Piece Source="$proc_filename"/>""")
        end
        
        println(io, """  </PUnstructuredGrid>""")
        println(io, """</VTKFile>""")
    end
end

"""
    ParallelCheckpointer

Handles parallel checkpointing for restart capabilities.
"""
struct ParallelCheckpointer
    config::ParallelIOConfig
    pdomain::ParallelDomain
    timer::ParallelTimer
    
    # Checkpoint management
    checkpoint_counter::Base.RefValue{Int}
    max_checkpoints_to_keep::Int
    
    function ParallelCheckpointer(config::ParallelIOConfig, pdomain::ParallelDomain;
                                max_checkpoints::Int=5)
        timer = create_parallel_timer(pdomain.mpi_ctx)
        new(config, pdomain, timer, Ref(0), max_checkpoints)
    end
end

"""
    write_checkpoint(checkpointer::ParallelCheckpointer, result::NSResult, 
                    time::Float64, timestep::Int)

Write parallel checkpoint for restart capability.
"""
function write_checkpoint(checkpointer::ParallelCheckpointer, result::NSResult,
                         time::Float64, timestep::Int)
    
    start_timer!(checkpointer.timer, "checkpoint_write")
    
    # Create checkpoint directory
    checkpoint_dir = joinpath(checkpointer.config.output_directory, "checkpoints")
    if is_root(checkpointer.pdomain.mpi_ctx)
        mkpath(checkpoint_dir)
    end
    barrier(checkpointer.pdomain.mpi_ctx)
    
    # Generate checkpoint filename
    checkpoint_counter = checkpointer.checkpoint_counter[]
    checkpoint_name = @sprintf("%s_checkpoint_%06d", checkpointer.config.file_prefix, checkpoint_counter)
    
    # Write process-local checkpoint data
    proc_checkpoint_file = joinpath(checkpoint_dir, "$(checkpoint_name)_p$(checkpointer.pdomain.rank).jld2")
    
    # Prepare checkpoint data
    checkpoint_data = Dict(
        "u" => result.u,
        "v" => result.v,
        "w" => result.w,
        "p" => result.p,
        "time" => time,
        "timestep" => timestep,
        "rank" => checkpointer.pdomain.rank,
        "num_procs" => checkpointer.pdomain.num_procs,
        "convergence_history" => result.convergence_history,
        "options" => result.options
    )
    
    # Write checkpoint file (would use JLD2.jl in practice)
    write_checkpoint_data(proc_checkpoint_file, checkpoint_data)
    
    # Root process writes metadata file
    if is_root(checkpointer.pdomain.mpi_ctx)
        metadata_file = joinpath(checkpoint_dir, "$(checkpoint_name)_metadata.json")
        write_checkpoint_metadata(metadata_file, checkpoint_name, checkpointer.pdomain, time, timestep)
        
        # Clean up old checkpoints
        cleanup_old_checkpoints(checkpointer, checkpoint_dir)
    end
    
    checkpointer.checkpoint_counter[] = checkpoint_counter + 1
    
    stop_timer!(checkpointer.timer, "checkpoint_write")
    
    if is_root(checkpointer.pdomain.mpi_ctx)
        @info "Checkpoint $checkpoint_name written at time $time"
    end
end

"""
    write_checkpoint_data(filename::String, data::Dict)

Write checkpoint data to file (simplified - would use JLD2.jl).
"""
function write_checkpoint_data(filename::String, data::Dict)
    # Simplified text-based checkpoint (in practice, would use binary format)
    open(filename, "w") do io
        for (key, value) in data
            if value isa AbstractArray
                println(io, "[$key]")
                println(io, "dims = $(size(value))")
                println(io, "data = $(vec(value))")
            else
                println(io, "$key = $value")
            end
            println(io)
        end
    end
end

"""
    write_checkpoint_metadata(filename::String, checkpoint_name::String, 
                             pdomain::ParallelDomain, time::Float64, timestep::Int)

Write checkpoint metadata file.
"""
function write_checkpoint_metadata(filename::String, checkpoint_name::String,
                                  pdomain::ParallelDomain, time::Float64, timestep::Int)
    
    metadata = Dict(
        "checkpoint_name" => checkpoint_name,
        "time" => time,
        "timestep" => timestep,
        "num_processes" => pdomain.num_procs,
        "domain_decomposition" => Dict(
            "processor_grid" => pdomain.proc_grid,
            "decomposition_strategy" => pdomain.decomposition_strategy
        ),
        "timestamp" => string(now())
    )
    
    # Write JSON metadata (simplified)
    open(filename, "w") do io
        for (key, value) in metadata
            println(io, "\"$key\": $value,")
        end
    end
end

"""
    cleanup_old_checkpoints(checkpointer::ParallelCheckpointer, checkpoint_dir::String)

Remove old checkpoint files to save disk space.
"""
function cleanup_old_checkpoints(checkpointer::ParallelCheckpointer, checkpoint_dir::String)
    if checkpointer.checkpoint_counter[] > checkpointer.max_checkpoints_to_keep
        # Find old checkpoint files to remove
        checkpoint_files = readdir(checkpoint_dir)
        checkpoint_pattern = r".*_checkpoint_(\d+)_.*"
        
        # Extract checkpoint numbers
        checkpoint_numbers = Int[]
        for file in checkpoint_files
            match_result = match(checkpoint_pattern, file)
            if match_result !== nothing
                push!(checkpoint_numbers, parse(Int, match_result[1]))
            end
        end
        
        # Keep only the most recent checkpoints
        if length(unique(checkpoint_numbers)) > checkpointer.max_checkpoints_to_keep
            sorted_checkpoints = sort(unique(checkpoint_numbers))
            old_checkpoints = sorted_checkpoints[1:end-checkpointer.max_checkpoints_to_keep]
            
            # Remove old checkpoint files
            for old_checkpoint in old_checkpoints
                old_pattern = @sprintf(".*_checkpoint_%06d_.*", old_checkpoint)
                for file in checkpoint_files
                    if occursin(Regex(old_pattern), file)
                        old_file = joinpath(checkpoint_dir, file)
                        try
                            rm(old_file)
                        catch e
                            @warn "Could not remove old checkpoint file $old_file: $e"
                        end
                    end
                end
            end
            
            @info "Cleaned up $(length(old_checkpoints)) old checkpoint(s)"
        end
    end
end

"""
    read_checkpoint(checkpointer::ParallelCheckpointer, checkpoint_name::String) -> Dict

Read parallel checkpoint for restart.
"""
function read_checkpoint(checkpointer::ParallelCheckpointer, checkpoint_name::String)
    checkpoint_dir = joinpath(checkpointer.config.output_directory, "checkpoints")
    proc_checkpoint_file = joinpath(checkpoint_dir, "$(checkpoint_name)_p$(checkpointer.pdomain.rank).jld2")
    
    if !isfile(proc_checkpoint_file)
        throw(ArgumentError("Checkpoint file not found: $proc_checkpoint_file"))
    end
    
    return read_checkpoint_data(proc_checkpoint_file)
end

"""
    read_checkpoint_data(filename::String) -> Dict

Read checkpoint data from file.
"""
function read_checkpoint_data(filename::String)
    # Simplified text-based reading (in practice, would use JLD2.jl)
    data = Dict{String, Any}()
    current_key = ""
    
    open(filename, "r") do io
        for line in eachline(io)
            line = strip(line)
            if startswith(line, "[") && endswith(line, "]")
                current_key = line[2:end-1]
            elseif occursin("=", line) && !startswith(line, "dims =") && !startswith(line, "data =")
                parts = split(line, " = ", limit=2)
                if length(parts) == 2
                    key, value_str = parts
                    # Parse value based on type
                    if occursin(".", value_str) || occursin("e", lowercase(value_str))
                        data[key] = parse(Float64, value_str)
                    elseif all(isdigit, replace(value_str, "-" => ""))
                        data[key] = parse(Int, value_str)
                    else
                        data[key] = value_str
                    end
                end
            elseif startswith(line, "data =")
                # Parse array data (simplified)
                data_str = line[7:end]
                # Would parse actual array data here
                data[current_key] = zeros(10, 10)  # Placeholder
            end
        end
    end
    
    return data
end

"""
    collect_solution_on_root(local_result::NSResult, pdomain::ParallelDomain) -> Union{NSResult, Nothing}

Collect distributed solution on root process for analysis or output.
"""
function collect_solution_on_root(local_result::NSResult, pdomain::ParallelDomain)
    if is_root(pdomain.mpi_ctx)
        # Root process collects solution from all processes
        global_u = gather_to_root(vec(local_result.u), pdomain.mpi_ctx)
        global_v = gather_to_root(vec(local_result.v), pdomain.mpi_ctx)
        global_p = gather_to_root(vec(local_result.p), pdomain.mpi_ctx)
        
        global_w = if local_result.w !== nothing
            gather_to_root(vec(local_result.w), pdomain.mpi_ctx)
        else
            nothing
        end
        
        # Reconstruct global solution (simplified - would need proper domain mapping)
        total_points = length(global_u)
        side_length = round(Int, total_points^(1/pdomain.local_domain.dim))
        
        if pdomain.local_domain.dim == 2
            global_solution = NSResult(
                u = reshape(global_u, side_length, side_length),
                v = reshape(global_v, side_length, side_length),
                w = global_w !== nothing ? reshape(global_w, side_length, side_length) : nothing,
                p = reshape(global_p, side_length, side_length),
                x = local_result.x,  # Would need to reconstruct global coordinates
                y = local_result.y,
                z = local_result.z,
                converged = local_result.converged,
                iterations = local_result.iterations,
                residual_norm = local_result.residual_norm,
                solve_time = local_result.solve_time,
                convergence_history = local_result.convergence_history,
                multidomain = pdomain.global_domain,
                options = local_result.options
            )
        else
            # 3D case
            global_solution = NSResult(
                u = reshape(global_u, side_length, side_length, side_length),
                v = reshape(global_v, side_length, side_length, side_length),
                w = global_w !== nothing ? reshape(global_w, side_length, side_length, side_length) : nothing,
                p = reshape(global_p, side_length, side_length, side_length),
                x = local_result.x,
                y = local_result.y,
                z = local_result.z,
                converged = local_result.converged,
                iterations = local_result.iterations,
                residual_norm = local_result.residual_norm,
                solve_time = local_result.solve_time,
                convergence_history = local_result.convergence_history,
                multidomain = pdomain.global_domain,
                options = local_result.options
            )
        end
        
        return global_solution
    else
        # Non-root processes just send their data
        gather_to_root(vec(local_result.u), pdomain.mpi_ctx)
        gather_to_root(vec(local_result.v), pdomain.mpi_ctx)
        gather_to_root(vec(local_result.p), pdomain.mpi_ctx)
        if local_result.w !== nothing
            gather_to_root(vec(local_result.w), pdomain.mpi_ctx)
        end
        
        return nothing
    end
end

# Export public interface
export ParallelIOConfig, create_parallel_io_config
export ParallelVTKWriter, export_parallel_vtk
export ParallelCheckpointer, write_checkpoint, read_checkpoint
export collect_solution_on_root