#ifndef XF_SOLVER_CHOLESKY_OPTIMIZED_HPP
#define XF_SOLVER_CHOLESKY_OPTIMIZED_HPP

#include "ap_fixed.h"
#include "hls_x_complex.h"
#include <complex>
#include "utils/std_complex_utils.h"
#include "utils/x_matrix_utils.hpp"
#include "hls_stream.h"

namespace xf {
namespace solver {

// ===================================================================================================================
// OPTIMIZATION 1: Enhanced traits for arch0 with optimized parameters
template <bool LowerTriangularL, int RowsColsA, typename InputType, typename OutputType>
struct choleskyTraitsOptimized {
    typedef InputType PROD_T;
    typedef InputType ACCUM_T;
    typedef InputType ADD_T;
    typedef InputType DIAG_T;
    typedef InputType RECIP_DIAG_T;
    typedef InputType OFF_DIAG_T;
    typedef OutputType L_OUTPUT_T;
    static const int ARCH = 0;  // Using arch0 as required
    static const int INNER_II = 1;  // Target II=1 for inner loop
    static const int UNROLL_FACTOR = 1;
    static const int UNROLL_DIM = (LowerTriangularL == true ? 1 : 2);
    static const int ARCH2_ZERO_LOOP = true;
};

// Specialization for complex fixed-point (YOUR COMPETITION TARGET)
template <bool LowerTriangularL,
          int RowsColsA,
          int W1, int I1, ap_q_mode Q1, ap_o_mode O1, int N1,
          int W2, int I2, ap_q_mode Q2, ap_o_mode O2, int N2>
struct choleskyTraitsOptimized<LowerTriangularL,
                               RowsColsA,
                               hls::x_complex<ap_fixed<W1, I1, Q1, O1, N1>>,
                               hls::x_complex<ap_fixed<W2, I2, Q2, O2, N2>>> {
    typedef hls::x_complex<ap_fixed<W1 + W1, I1 + I1, AP_RND_CONV, AP_SAT, 0>> PROD_T;
    typedef hls::x_complex<ap_fixed<(W1 + W1) + BitWidth<RowsColsA>::Value,
                                    (I1 + I1) + BitWidth<RowsColsA>::Value,
                                    AP_RND_CONV, AP_SAT, 0>> ACCUM_T;
    typedef hls::x_complex<ap_fixed<W1 + 1, I1 + 1, AP_RND_CONV, AP_SAT, 0>> ADD_T;
    typedef hls::x_complex<ap_fixed<(W1 + 1) * 2, I1 + 1, AP_RND_CONV, AP_SAT, 0>> DIAG_T;
    typedef hls::x_complex<ap_fixed<(W1 + 1) * 2, I1 + 1, AP_RND_CONV, AP_SAT, 0>> OFF_DIAG_T;
    typedef ap_fixed<2 + (W2 - I2) + W2, 2 + (W2 - I2), AP_RND_CONV, AP_SAT, 0> RECIP_DIAG_T;
    typedef hls::x_complex<ap_fixed<W2, I2, AP_RND_CONV, AP_SAT, 0>> L_OUTPUT_T;
    static const int ARCH = 0;
    static const int INNER_II = 1;
    static const int UNROLL_FACTOR = 2;  // OPTIMIZATION: Unroll by 2
    static const int UNROLL_DIM = (LowerTriangularL == true ? 1 : 2);
    static const int ARCH2_ZERO_LOOP = true;
};

// ===================================================================================================================
// OPTIMIZATION 2: Optimized helper functions with inline directives

// Optimized square root with inline
template <typename T_IN, typename T_OUT>
int cholesky_sqrt_op_opt(T_IN a, T_OUT& b) {
#pragma HLS INLINE
    const T_IN ZERO = 0;
    if (a < ZERO) {
        b = ZERO;
        return (1);
    }
    b = x_sqrt(a);
    return (0);
}

// Optimized for complex (diagonal is always real)
template <typename T_IN, typename T_OUT>
int cholesky_sqrt_op_opt(hls::x_complex<T_IN> din, hls::x_complex<T_OUT>& dout) {
#pragma HLS INLINE
    const T_IN ZERO = 0;
    T_IN a = din.real();
    dout.imag(ZERO);
    
    if (a < ZERO) {
        dout.real(ZERO);
        return (1);
    }
    
    dout.real(x_sqrt(a));
    return (0);
}

// Optimized reciprocal square root
template <int W1, int I1, ap_q_mode Q1, ap_o_mode O1, int N1,
          int W2, int I2, ap_q_mode Q2, ap_o_mode O2, int N2>
void cholesky_rsqrt_opt(ap_fixed<W1, I1, Q1, O1, N1> x,
                        ap_fixed<W2, I2, Q2, O2, N2>& res) {
#pragma HLS INLINE off
    ap_fixed<W2, I2, Q2, O2, N2> one = 1;
    ap_fixed<W1, I1, Q1, O1, N1> sqrt_res;
    ap_fixed<W2, I2, Q2, O2, N2> sqrt_res_cast;
    sqrt_res = x_sqrt(x);
    sqrt_res_cast = sqrt_res;
    res = one / sqrt_res_cast;
}

// Optimized complex multiplication by real
template <typename AType, typename BType, typename CType>
void cholesky_prod_sum_mult_opt(hls::x_complex<AType> A, BType B, hls::x_complex<CType>& C) {
#pragma HLS INLINE
    C.real(A.real() * B);
    C.imag(A.imag() * B);
}

// ===================================================================================================================
// OPTIMIZATION 3: Highly optimized choleskyBasic for arch0
template <bool LowerTriangularL, int RowsColsA, typename CholeskyTraits, 
          class InputType, class OutputType>
int choleskyBasicOptimized(const InputType A[RowsColsA][RowsColsA], 
                           OutputType L[RowsColsA][RowsColsA]) {
#pragma HLS INLINE off
    
    int return_code = 0;

    // OPTIMIZATION: Declare internal variables with optimal types
    typename CholeskyTraits::PROD_T prod;
    typename CholeskyTraits::ACCUM_T sum[RowsColsA];
    typename CholeskyTraits::ACCUM_T A_cast_to_sum;
    typename CholeskyTraits::ACCUM_T prod_cast_to_sum;
    typename CholeskyTraits::ADD_T A_minus_sum;
    typename CholeskyTraits::DIAG_T new_L_diag;
    typename CholeskyTraits::OFF_DIAG_T new_L_off_diag;
    typename CholeskyTraits::OFF_DIAG_T L_cast_to_new_L_off_diag;
    typename CholeskyTraits::L_OUTPUT_T new_L;
    OutputType retrieved_L;
    
    // OPTIMIZATION 4: Partition internal L matrix for parallel access
    OutputType L_internal[RowsColsA][RowsColsA];
    #pragma HLS ARRAY_PARTITION variable=L_internal cyclic factor=2 dim=2
    
    // OPTIMIZATION 5: Partition accumulator array
    #pragma HLS ARRAY_PARTITION variable=sum cyclic factor=2 dim=1
    
    // OPTIMIZATION 6: Bind L_internal to dual-port BRAM for concurrent access
    #pragma HLS BIND_STORAGE variable=L_internal type=ram_2p impl=bram

// MAIN COLUMN LOOP - Process each column
col_loop:
    for (int j = 0; j < RowsColsA; j++) {
        // Initialize sum for this column
        sum[j] = 0;

        // =====================================================================
        // DIAGONAL ELEMENT CALCULATION
        // =====================================================================
        // Calculate sum of squares: sum = L[j][0]^2 + L[j][1]^2 + ... + L[j][j-1]^2
    diag_loop:
        for (int k = 0; k < j; k++) {
            // OPTIMIZATION 7: Pipeline this loop with II=1
            #pragma HLS PIPELINE II=1
            #pragma HLS LOOP_TRIPCOUNT min=0 max=RowsColsA-1 avg=RowsColsA/2
            
            // Read previously computed L value
            if (LowerTriangularL == true) {
                retrieved_L = L_internal[j][k];
            } else {
                retrieved_L = L_internal[k][j];
            }
            
            // Accumulate: sum += conj(L[j][k]) * L[j][k] = |L[j][k]|^2
            sum[j] += hls::x_conj(retrieved_L) * retrieved_L;
        }
        
        // Compute diagonal: L[j][j] = sqrt(A[j][j] - sum)
        A_cast_to_sum = A[j][j];
        A_minus_sum = A_cast_to_sum - sum[j];
        
        if (cholesky_sqrt_op_opt(A_minus_sum, new_L_diag)) {
            #ifndef _SYNTHESIS_
            printf("ERROR: Negative value under square root at diagonal [%d][%d]\n", j, j);
            #endif
            return_code = 1;
        }
        
        new_L = new_L_diag;
        
        // Store diagonal value
        if (LowerTriangularL == true) {
            L_internal[j][j] = new_L;
            L[j][j] = new_L;
        } else {
            L_internal[j][j] = hls::x_conj(new_L);
            L[j][j] = hls::x_conj(new_L);
        }

        // =====================================================================
        // OFF-DIAGONAL ELEMENTS CALCULATION
        // =====================================================================
        // For each row below diagonal: L[i][j] = (A[i][j] - sum) / L[j][j]
    off_diag_loop:
        for (int i = j+1; i < RowsColsA; i++) {
            // OPTIMIZATION 8: Changed loop to start at j+1 (skip unnecessary iterations)
            #pragma HLS LOOP_TRIPCOUNT min=1 max=RowsColsA-1 avg=RowsColsA/2
            
            // Initialize sum with A[i][j]
            if (LowerTriangularL == true) {
                sum[j] = A[i][j];
            } else {
                sum[j] = hls::x_conj(A[j][i]);
            }
            
            // Inner product: sum = A[i][j] - L[i][0:j-1] * conj(L[j][0:j-1])
        sum_loop:
            for (int k = 0; k < j; k++) {
                // OPTIMIZATION 9: Critical inner loop - pipeline with II=1
                #pragma HLS PIPELINE II=1
                #pragma HLS LOOP_TRIPCOUNT min=0 max=RowsColsA-1 avg=RowsColsA/2
                
                // OPTIMIZATION 10: Unroll by factor of 2 for more parallelism
                #pragma HLS UNROLL factor=2
                
                // Compute: sum -= L[i][k] * conj(L[j][k])
                if (LowerTriangularL == true) {
                    prod = -L_internal[i][k] * hls::x_conj(L_internal[j][k]);
                } else {
                    prod = -hls::x_conj(L_internal[k][i]) * L_internal[k][j];
                }
                
                prod_cast_to_sum = prod;
                sum[j] += prod_cast_to_sum;
            }
            
            // Divide by diagonal: L[i][j] = sum / L[j][j]
            new_L_off_diag = sum[j];
            L_cast_to_new_L_off_diag = L_internal[j][j];
            
            // OPTIMIZATION 11: Diagonal is real, use optimized real division
            new_L_off_diag = new_L_off_diag / hls::x_real(L_cast_to_new_L_off_diag);
            
            new_L = new_L_off_diag;
            
            // Store result
            if (LowerTriangularL == true) {
                L[i][j] = new_L;
                L_internal[i][j] = new_L;
            } else {
                L[j][i] = hls::x_conj(new_L);
                L_internal[j][i] = hls::x_conj(new_L);
            }
        }
        
        // =====================================================================
        // ZERO UPPER/LOWER TRIANGLE
        // =====================================================================
        // OPTIMIZATION 12: Pipeline zeroing loop separately
    zero_loop:
        for (int i = 0; i < j; i++) {
            #pragma HLS PIPELINE II=1
            #pragma HLS LOOP_TRIPCOUNT min=0 max=RowsColsA-1 avg=RowsColsA/2
            
            if (LowerTriangularL == true) {
                L[i][j] = 0;  // Zero upper triangle
            } else {
                L[j][i] = 0;  // Zero lower triangle
            }
        }
    }
    
    return return_code;
}

// ===================================================================================================================
// OPTIMIZATION 13: Top-level wrapper with optimized traits selection
template <bool LowerTriangularL, int RowsColsA, typename CholeskyTraits, 
          class InputType, class OutputType>
int choleskyTopOptimized(const InputType A[RowsColsA][RowsColsA], 
                         OutputType L[RowsColsA][RowsColsA]) {
#pragma HLS INLINE off
    
    // Force use of optimized arch0 implementation
    return choleskyBasicOptimized<LowerTriangularL, RowsColsA, CholeskyTraits, 
                                  InputType, OutputType>(A, L);
}

/**
 * @brief Optimized cholesky decomposition for competition
 * 
 * OPTIMIZATIONS APPLIED:
 * 1. Array partitioning (cyclic factor=2) for L_internal and sum arrays
 * 2. Dual-port BRAM binding for concurrent reads
 * 3. Pipeline all loops with II=1 target
 * 4. Unroll inner sum_loop by factor=2
 * 5. Optimized loop bounds (start at j+1 for off-diagonal)
 * 6. Separate pipelined zeroing loop
 * 7. Inline optimization for helper functions
 * 8. Enhanced traits with optimal parameters
 * 9. Real division optimization for diagonal (always real)
 * 10. Better loop trip count hints for HLS
 * 
 * @tparam LowerTriangularL   When false generates result in upper triangle
 * @tparam RowsColsA          Matrix dimensions (typically 8 for competition)
 * @tparam InputType          Complex fixed-point input type
 * @tparam OutputType         Complex fixed-point output type
 * @tparam TRAITS             choleskyTraitsOptimized class
 * 
 * @param matrixAStrm         Input stream of Hermitian positive definite matrix
 * @param matrixLStrm         Output stream of triangular matrix
 * @param return_code         Output parameter for return code (0=Success, 1=Failure)
 */
template <bool LowerTriangularL,
          int RowsColsA,
          class InputType,
          class OutputType,
          typename TRAITS = choleskyTraitsOptimized<LowerTriangularL, RowsColsA, 
                                                     InputType, OutputType>>
void choleskyOptimized(hls::stream<InputType>& matrixAStrm, 
                      hls::stream<OutputType>& matrixLStrm,
                      int& return_code) {
#pragma HLS INLINE off
    
    // OPTIMIZATION 14: Local matrices with optimized partitioning
    InputType A[RowsColsA][RowsColsA];
    OutputType L[RowsColsA][RowsColsA];
    
    // OPTIMIZATION 15: Partition input/output matrices for burst access
    #pragma HLS ARRAY_PARTITION variable=A cyclic factor=2 dim=2
    #pragma HLS ARRAY_PARTITION variable=L cyclic factor=2 dim=2

    // Read input matrix from stream
read_loop_r:
    for (int r = 0; r < RowsColsA; r++) {
    read_loop_c:
        for (int c = 0; c < RowsColsA; c++) {
            // OPTIMIZATION 16: Pipeline read loops
            #pragma HLS PIPELINE II=1
            #pragma HLS LOOP_FLATTEN
            A[r][c] = matrixAStrm.read();
        }
    }

    // Perform Cholesky decomposition
    return_code = choleskyTopOptimized<LowerTriangularL, RowsColsA, TRAITS, 
                                       InputType, OutputType>(A, L);

    // Write output matrix to stream
write_loop_r:
    for (int r = 0; r < RowsColsA; r++) {
    write_loop_c:
        for (int c = 0; c < RowsColsA; c++) {
            // OPTIMIZATION 17: Pipeline write loops
            #pragma HLS PIPELINE II=1
            #pragma HLS LOOP_FLATTEN
            matrixLStrm.write(L[r][c]);
        }
    }
}

// ===================================================================================================================
// BACKWARD COMPATIBILITY: Wrapper to match original function signature
template <bool LowerTriangularL,
          int RowsColsA,
          class InputType,
          class OutputType,
          typename TRAITS = choleskyTraitsOptimized<LowerTriangularL, RowsColsA, 
                                                     InputType, OutputType>>
int cholesky(hls::stream<InputType>& matrixAStrm, 
             hls::stream<OutputType>& matrixLStrm) {
    int return_code;
    
    // Remove DATAFLOW pragma and use sequential execution
    // OPTIMIZATION 14: Local matrices with optimized partitioning
    InputType A[RowsColsA][RowsColsA];
    OutputType L[RowsColsA][RowsColsA];
    
    // OPTIMIZATION 15: Partition input/output matrices for burst access
    #pragma HLS ARRAY_PARTITION variable=A cyclic factor=2 dim=2
    #pragma HLS ARRAY_PARTITION variable=L cyclic factor=2 dim=2

    // Read input matrix from stream
read_loop_r:
    for (int r = 0; r < RowsColsA; r++) {
    read_loop_c:
        for (int c = 0; c < RowsColsA; c++) {
            // OPTIMIZATION 16: Pipeline read loops
            #pragma HLS PIPELINE II=1
            #pragma HLS LOOP_FLATTEN
            A[r][c] = matrixAStrm.read();
        }
    }

    // Perform Cholesky decomposition
    return_code = choleskyTopOptimized<LowerTriangularL, RowsColsA, TRAITS, 
                                       InputType, OutputType>(A, L);

    // Write output matrix to stream
write_loop_r:
    for (int r = 0; r < RowsColsA; r++) {
    write_loop_c:
        for (int c = 0; c < RowsColsA; c++) {
            // OPTIMIZATION 17: Pipeline write loops
            #pragma HLS PIPELINE II=1
            #pragma HLS LOOP_FLATTEN
            matrixLStrm.write(L[r][c]);
        }
    }
    
    return return_code;
}

template <bool LowerTriangularL, int RowsColsA, typename InputType, typename OutputType>
using choleskyTraits = choleskyTraitsOptimized<LowerTriangularL, RowsColsA, InputType, OutputType>;

} // end namespace solver
} // end namespace xf

#endif // XF_SOLVER_CHOLESKY_OPTIMIZED_HPP