#include "GPU_FormUnbalance.h"
#include <AnalysisModel.h>
#include <LinearSOE.h>
#include <FE_Element.h>
#include <DOF_Group.h>
#include <Element.h>
#include <Node.h>
#include <Vector.h>
#include <ID.h>
#include <FE_EleIter.h>
#include <DOF_GrpIter.h>
#include <iostream>
#include <chrono>

// CUDA error checking macro
#define CUDA_CHECK(call) do { \
    cudaError_t err = call; \
    if (err != cudaSuccess) { \
        std::cerr << "CUDA error at " << __FILE__ << ":" << __LINE__ << " - " << cudaGetErrorString(err) << std::endl; \
        return -1; \
    } \
} while(0)

// CUSPARSE error checking macro
#define CUSPARSE_CHECK(call) do { \
    cusparseStatus_t status = call; \
    if (status != CUSPARSE_STATUS_SUCCESS) { \
        std::cerr << "CUSPARSE error at " << __FILE__ << ":" << __LINE__ << std::endl; \
        return -1; \
    } \
} while(0)

// CUBLAS error checking macro
#define CUBLAS_CHECK(call) do { \
    cublasStatus_t status = call; \
    if (status != CUBLAS_STATUS_SUCCESS) { \
        std::cerr << "CUBLAS error at " << __FILE__ << ":" << __LINE__ << std::endl; \
        return -1; \
    } \
} while(0)

//==============================================================================
// CUDA Kernels
//==============================================================================

/**
 * Kernel for computing element residuals in parallel
 * Each thread block processes one element
 */
__global__ void computeElementResidualsKernel(
    const double* __restrict__ d_coords,
    const int* __restrict__ d_connectivity,
    const int* __restrict__ d_types,
    const double* __restrict__ d_properties,
    const double* __restrict__ d_displacements,
    const int* __restrict__ d_dofMaps,
    double* __restrict__ d_residuals,
    int numElements,
    int maxNodesPerElement,
    int maxDOFsPerElement
) {
    int elementID = blockIdx.x;
    int threadID = threadIdx.x;
    
    if (elementID >= numElements) return;
    
    // Shared memory for element data
    extern __shared__ double sharedMem[];
    double* elementCoords = sharedMem;
    double* elementDisp = elementCoords + maxNodesPerElement * 3;
    double* elementResidual = elementDisp + maxDOFsPerElement;
    double* B_matrix = elementResidual + maxDOFsPerElement;
    double* stress = B_matrix + maxDOFsPerElement * 6; // 6 stress components
    
    // Element offset
    int elemOffset = elementID * maxNodesPerElement;
    int dofOffset = elementID * maxDOFsPerElement;
    int propOffset = elementID * 10; // Assuming max 10 material properties
    
    // Load element coordinates and displacements cooperatively
    for (int i = threadID; i < maxNodesPerElement * 3; i += blockDim.x) {
        int nodeIdx = elemOffset + i / 3;
        int coordIdx = i % 3;
        if (nodeIdx < numElements * maxNodesPerElement) {
            int globalNodeID = d_connectivity[nodeIdx];
            if (globalNodeID >= 0) {
                elementCoords[i] = d_coords[globalNodeID * 3 + coordIdx];
            }
        }
    }
    
    for (int i = threadID; i < maxDOFsPerElement; i += blockDim.x) {
        int globalDOF = d_dofMaps[dofOffset + i];
        if (globalDOF >= 0) {
            elementDisp[i] = d_displacements[globalDOF];
        } else {
            elementDisp[i] = 0.0;
        }
    }
    
    __syncthreads();
    
    // Initialize residual
    for (int i = threadID; i < maxDOFsPerElement; i += blockDim.x) {
        elementResidual[i] = 0.0;
    }
    
    __syncthreads();
    
    // Get element type
    int elementType = d_types[elementID];
    
    // Compute element residual based on type
    // For demonstration, implementing 8-node brick element (type 1)
    if (elementType == 1 && threadID < 8) { // 8 Gauss points for brick element
        
        // Gauss point coordinates (natural coordinates)
        const double gaussPts[8][3] = {
            {-0.577350269, -0.577350269, -0.577350269},
            { 0.577350269, -0.577350269, -0.577350269},
            { 0.577350269,  0.577350269, -0.577350269},
            {-0.577350269,  0.577350269, -0.577350269},
            {-0.577350269, -0.577350269,  0.577350269},
            { 0.577350269, -0.577350269,  0.577350269},
            { 0.577350269,  0.577350269,  0.577350269},
            {-0.577350269,  0.577350269,  0.577350269}
        };
        
        double xi = gaussPts[threadID][0];
        double eta = gaussPts[threadID][1];
        double zeta = gaussPts[threadID][2];
        
        // Shape functions for 8-node brick
        double N[8];
        N[0] = 0.125 * (1 - xi) * (1 - eta) * (1 - zeta);
        N[1] = 0.125 * (1 + xi) * (1 - eta) * (1 - zeta);
        N[2] = 0.125 * (1 + xi) * (1 + eta) * (1 - zeta);
        N[3] = 0.125 * (1 - xi) * (1 + eta) * (1 - zeta);
        N[4] = 0.125 * (1 - xi) * (1 - eta) * (1 + zeta);
        N[5] = 0.125 * (1 + xi) * (1 - eta) * (1 + zeta);
        N[6] = 0.125 * (1 + xi) * (1 + eta) * (1 + zeta);
        N[7] = 0.125 * (1 - xi) * (1 + eta) * (1 + zeta);
        
        // Shape function derivatives
        double dN_dxi[8], dN_deta[8], dN_dzeta[8];
        
        dN_dxi[0] = -0.125 * (1 - eta) * (1 - zeta);
        dN_dxi[1] =  0.125 * (1 - eta) * (1 - zeta);
        dN_dxi[2] =  0.125 * (1 + eta) * (1 - zeta);
        dN_dxi[3] = -0.125 * (1 + eta) * (1 - zeta);
        dN_dxi[4] = -0.125 * (1 - eta) * (1 + zeta);
        dN_dxi[5] =  0.125 * (1 - eta) * (1 + zeta);
        dN_dxi[6] =  0.125 * (1 + eta) * (1 + zeta);
        dN_dxi[7] = -0.125 * (1 + eta) * (1 + zeta);
        
        dN_deta[0] = -0.125 * (1 - xi) * (1 - zeta);
        dN_deta[1] = -0.125 * (1 + xi) * (1 - zeta);
        dN_deta[2] =  0.125 * (1 + xi) * (1 - zeta);
        dN_deta[3] =  0.125 * (1 - xi) * (1 - zeta);
        dN_deta[4] = -0.125 * (1 - xi) * (1 + zeta);
        dN_deta[5] = -0.125 * (1 + xi) * (1 + zeta);
        dN_deta[6] =  0.125 * (1 + xi) * (1 + zeta);
        dN_deta[7] =  0.125 * (1 - xi) * (1 + zeta);
        
        dN_dzeta[0] = -0.125 * (1 - xi) * (1 - eta);
        dN_dzeta[1] = -0.125 * (1 + xi) * (1 - eta);
        dN_dzeta[2] = -0.125 * (1 + xi) * (1 + eta);
        dN_dzeta[3] = -0.125 * (1 - xi) * (1 + eta);
        dN_dzeta[4] =  0.125 * (1 - xi) * (1 - eta);
        dN_dzeta[5] =  0.125 * (1 + xi) * (1 - eta);
        dN_dzeta[6] =  0.125 * (1 + xi) * (1 + eta);
        dN_dzeta[7] =  0.125 * (1 - xi) * (1 + eta);
        
        // Compute Jacobian matrix
        double J[9] = {0}; // 3x3 matrix stored row-wise
        for (int i = 0; i < 8; i++) {
            J[0] += dN_dxi[i]   * elementCoords[i*3];     // dx/dxi
            J[1] += dN_dxi[i]   * elementCoords[i*3+1];   // dy/dxi
            J[2] += dN_dxi[i]   * elementCoords[i*3+2];   // dz/dxi
            J[3] += dN_deta[i]  * elementCoords[i*3];     // dx/deta
            J[4] += dN_deta[i]  * elementCoords[i*3+1];   // dy/deta
            J[5] += dN_deta[i]  * elementCoords[i*3+2];   // dz/deta
            J[6] += dN_dzeta[i] * elementCoords[i*3];     // dx/dzeta
            J[7] += dN_dzeta[i] * elementCoords[i*3+1];   // dy/dzeta
            J[8] += dN_dzeta[i] * elementCoords[i*3+2];   // dz/dzeta
        }
        
        // Compute determinant of Jacobian
        double detJ = J[0] * (J[4]*J[8] - J[5]*J[7]) - 
                      J[1] * (J[3]*J[8] - J[5]*J[6]) + 
                      J[2] * (J[3]*J[7] - J[4]*J[6]);
        
        if (fabs(detJ) < 1e-12) return; // Degenerate element
        
        // Compute inverse Jacobian
        double invJ[9];
        double invDetJ = 1.0 / detJ;
        invJ[0] = (J[4]*J[8] - J[5]*J[7]) * invDetJ;
        invJ[1] = (J[2]*J[7] - J[1]*J[8]) * invDetJ;
        invJ[2] = (J[1]*J[5] - J[2]*J[4]) * invDetJ;
        invJ[3] = (J[5]*J[6] - J[3]*J[8]) * invDetJ;
        invJ[4] = (J[0]*J[8] - J[2]*J[6]) * invDetJ;
        invJ[5] = (J[2]*J[3] - J[0]*J[5]) * invDetJ;
        invJ[6] = (J[3]*J[7] - J[4]*J[6]) * invDetJ;
        invJ[7] = (J[1]*J[6] - J[0]*J[7]) * invDetJ;
        invJ[8] = (J[0]*J[4] - J[1]*J[3]) * invDetJ;
        
        // Compute global derivatives
        double dN_dx[8], dN_dy[8], dN_dz[8];
        for (int i = 0; i < 8; i++) {
            dN_dx[i] = invJ[0] * dN_dxi[i] + invJ[1] * dN_deta[i] + invJ[2] * dN_dzeta[i];
            dN_dy[i] = invJ[3] * dN_dxi[i] + invJ[4] * dN_deta[i] + invJ[5] * dN_dzeta[i];
            dN_dz[i] = invJ[6] * dN_dxi[i] + invJ[7] * dN_deta[i] + invJ[8] * dN_dzeta[i];
        }
        
        // Compute strains
        double strain[6] = {0}; // ex, ey, ez, gxy, gyz, gxz
        for (int i = 0; i < 8; i++) {
            strain[0] += dN_dx[i] * elementDisp[i*3];     // ex
            strain[1] += dN_dy[i] * elementDisp[i*3+1];   // ey
            strain[2] += dN_dz[i] * elementDisp[i*3+2];   // ez
            strain[3] += dN_dy[i] * elementDisp[i*3] + dN_dx[i] * elementDisp[i*3+1]; // gxy
            strain[4] += dN_dz[i] * elementDisp[i*3+1] + dN_dy[i] * elementDisp[i*3+2]; // gyz
            strain[5] += dN_dx[i] * elementDisp[i*3+2] + dN_dz[i] * elementDisp[i*3]; // gxz
        }
        
        // Material properties (linear elastic for demonstration)
        double E = d_properties[propOffset];     // Young's modulus
        double nu = d_properties[propOffset+1];  // Poisson's ratio
        
        // Constitutive matrix for 3D elasticity
        double lambda = E * nu / ((1 + nu) * (1 - 2*nu));
        double mu = E / (2 * (1 + nu));
        
        // Compute stress
        double localStress[6];
        localStress[0] = (lambda + 2*mu) * strain[0] + lambda * strain[1] + lambda * strain[2];
        localStress[1] = lambda * strain[0] + (lambda + 2*mu) * strain[1] + lambda * strain[2];
        localStress[2] = lambda * strain[0] + lambda * strain[1] + (lambda + 2*mu) * strain[2];
        localStress[3] = mu * strain[3];
        localStress[4] = mu * strain[4];
        localStress[5] = mu * strain[5];
        
        // Compute element residual contribution from this Gauss point
        // R = B^T * stress * detJ
        for (int i = 0; i < 8; i++) {
            // u-component
            double Bu_stress = dN_dx[i] * localStress[0] + dN_dy[i] * localStress[3] + dN_dz[i] * localStress[5];
            atomicAdd(&elementResidual[i*3], Bu_stress * fabs(detJ));
            
            // v-component
            double Bv_stress = dN_dy[i] * localStress[1] + dN_dx[i] * localStress[3] + dN_dz[i] * localStress[4];
            atomicAdd(&elementResidual[i*3+1], Bv_stress * fabs(detJ));
            
            // w-component
            double Bw_stress = dN_dz[i] * localStress[2] + dN_dy[i] * localStress[4] + dN_dx[i] * localStress[5];
            atomicAdd(&elementResidual[i*3+2], Bw_stress * fabs(detJ));
        }
    }
    
    __syncthreads();
    
    // Copy element residual to global array
    for (int i = threadID; i < maxDOFsPerElement; i += blockDim.x) {
        d_residuals[dofOffset + i] = elementResidual[i];
    }
}

/**
 * Kernel for computing nodal unbalance forces
 */
__global__ void computeNodalUnbalanceKernel(
    const double* __restrict__ d_nodalLoads,
    const int* __restrict__ d_nodalDOFMaps,
    double* __restrict__ d_globalResidual,
    int numNodes,
    int maxDOFsPerNode
) {
    int nodeID = blockIdx.x;
    int dofID = threadIdx.x;
    
    if (nodeID >= numNodes || dofID >= maxDOFsPerNode) return;
    
    int loadOffset = nodeID * maxDOFsPerNode + dofID;
    int mapOffset = nodeID * maxDOFsPerNode + dofID;
    
    int globalDOF = d_nodalDOFMaps[mapOffset];
    if (globalDOF >= 0) {
        double load = d_nodalLoads[loadOffset];
        atomicAdd(&d_globalResidual[globalDOF], load);
    }
}

/**
 * Kernel for assembling element residuals into global residual vector
 */
__global__ void assembleGlobalResidualKernel(
    const double* __restrict__ d_elementResiduals,
    const int* __restrict__ d_assemblyIndices,
    const int* __restrict__ d_assemblyOffsets,
    double* __restrict__ d_globalResidual,
    int numElements,
    int maxDOFsPerElement
) {
    int elementID = blockIdx.x;
    int localDOF = threadIdx.x;
    
    if (elementID >= numElements || localDOF >= maxDOFsPerElement) return;
    
    int elemOffset = elementID * maxDOFsPerElement;
    int globalDOF = d_assemblyIndices[elemOffset + localDOF];
    
    if (globalDOF >= 0) {
        double residual = d_elementResiduals[elemOffset + localDOF];
        atomicAdd(&d_globalResidual[globalDOF], -residual); // Note: negative for internal forces
    }
}

//==============================================================================
// Host interface functions
//==============================================================================

extern "C" void launch_computeElementResiduals(
    const double* d_coords, const int* d_connectivity, const int* d_types,
    const double* d_properties, const double* d_displacements,
    const int* d_dofMaps, double* d_residuals,
    int numElements, int maxNodesPerElement, int maxDOFsPerElement,
    cudaStream_t stream
) {
    dim3 grid(numElements);
    dim3 block(32); // Use 32 threads per block for better occupancy
    
    // Calculate shared memory size
    int sharedMemSize = (maxNodesPerElement * 3 + maxDOFsPerElement + maxDOFsPerElement + 
                        maxDOFsPerElement * 6 + 6) * sizeof(double);
    
    computeElementResidualsKernel<<<grid, block, sharedMemSize, stream>>>(
        d_coords, d_connectivity, d_types, d_properties, d_displacements,
        d_dofMaps, d_residuals, numElements, maxNodesPerElement, maxDOFsPerElement
    );
}

extern "C" void launch_computeNodalUnbalance(
    const double* d_nodalLoads, const int* d_nodalDOFMaps,
    double* d_globalResidual, int numNodes, int maxDOFsPerNode,
    cudaStream_t stream
) {
    dim3 grid(numNodes);
    dim3 block(maxDOFsPerNode);
    
    computeNodalUnbalanceKernel<<<grid, block, 0, stream>>>(
        d_nodalLoads, d_nodalDOFMaps, d_globalResidual, numNodes, maxDOFsPerNode
    );
}

extern "C" void launch_assembleGlobalResidual(
    const double* d_elementResiduals, const int* d_assemblyIndices,
    const int* d_assemblyOffsets, double* d_globalResidual,
    int numElements, int maxDOFsPerElement, cudaStream_t stream
) {
    dim3 grid(numElements);
    dim3 block(maxDOFsPerElement);
    
    assembleGlobalResidualKernel<<<grid, block, 0, stream>>>(
        d_elementResiduals, d_assemblyIndices, d_assemblyOffsets, 
        d_globalResidual, numElements, maxDOFsPerElement
    );
}

//==============================================================================
// GPU_FormUnbalance Class Implementation
//==============================================================================

GPU_FormUnbalance::GPU_FormUnbalance(int maxElements, int maxDOFs, int maxNodes)
    : maxElements(maxElements), maxDOFs(maxDOFs), maxNodes(maxNodes),
      currentElements(0), currentDOFs(0), currentNodes(0),
      profilingEnabled(false), lastComputeTime(0.0), lastTransferTime(0.0),
      cublasHandle(nullptr), cusparseHandle(nullptr)
{
    // Initialize GPU data structure
    memset(&gpuData, 0, sizeof(GPUData));
    memset(&streams, 0, sizeof(GPUStreams));
    
    if (initializeGPU() != 0) {
        std::cerr << "Failed to initialize GPU resources" << std::endl;
    }
}

GPU_FormUnbalance::~GPU_FormUnbalance() {
    cleanupGPU();
}

int GPU_FormUnbalance::initializeGPU() {
    // Create CUDA streams
    CUDA_CHECK(cudaStreamCreate(&streams.computeStream));
    CUDA_CHECK(cudaStreamCreate(&streams.transferStream));
    CUDA_CHECK(cudaEventCreate(&streams.computeEvent));
    CUDA_CHECK(cudaEventCreate(&streams.transferEvent));
    
    // Create timing events
    CUDA_CHECK(cudaEventCreate(&startEvent));
    CUDA_CHECK(cudaEventCreate(&stopEvent));
    
    // Initialize CUBLAS
    CUBLAS_CHECK(cublasCreate(&cublasHandle));
    CUBLAS_CHECK(cublasSetStream(cublasHandle, streams.computeStream));
    
    // Initialize CUSPARSE
    CUSPARSE_CHECK(cusparseCreate(&cusparseHandle));
    CUSPARSE_CHECK(cusparseSetStream(cusparseHandle, streams.computeStream));
    
    // Allocate GPU memory
    return allocateGPUMemory();
}

void GPU_FormUnbalance::cleanupGPU() {
    freeGPUMemory();
    
    if (streams.computeStream) cudaStreamDestroy(streams.computeStream);
    if (streams.transferStream) cudaStreamDestroy(streams.transferStream);
    if (streams.computeEvent) cudaEventDestroy(streams.computeEvent);
    if (streams.transferEvent) cudaEventDestroy(streams.transferEvent);
    if (startEvent) cudaEventDestroy(startEvent);
    if (stopEvent) cudaEventDestroy(stopEvent);
    
    if (cublasHandle) cublasDestroy(cublasHandle);
    if (cusparseHandle) cusparseDestroy(cusparseHandle);
}

int GPU_FormUnbalance::allocateGPUMemory() {
    size_t elemCoordSize = maxElements * 8 * 3 * sizeof(double); // 8 nodes, 3 coords
    size_t elemConnSize = maxElements * 8 * sizeof(int);
    size_t elemTypeSize = maxElements * sizeof(int);
    size_t elemPropSize = maxElements * 10 * sizeof(double); // 10 material properties
    size_t elemDOFMapSize = maxElements * 24 * sizeof(int); // 24 DOFs per element
    size_t globalCoordSize = maxNodes * 3 * sizeof(double);
    size_t globalDispSize = maxDOFs * sizeof(double);
    size_t globalResSize = maxDOFs * sizeof(double);
    size_t elemResSize = maxElements * 24 * sizeof(double);
    size_t workSpaceSize = maxDOFs * sizeof(double);
    size_t nodalLoadSize = maxNodes * 6 * sizeof(double); // 6 DOFs per node
    size_t nodalDOFMapSize = maxNodes * 6 * sizeof(int);
    size_t assemblyIndSize = maxElements * 24 * sizeof(int);
    size_t assemblyOffSize = (maxElements + 1) * sizeof(int);
    
    CUDA_CHECK(cudaMalloc(&gpuData.d_elementCoords, elemCoordSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_elementConnectivity, elemConnSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_elementTypes, elemTypeSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_elementProperties, elemPropSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_elementDOFMaps, elemDOFMapSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_globalCoords, globalCoordSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_globalDisplacements, globalDispSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_globalResidual, globalResSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_elementResiduals, elemResSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_workSpace, workSpaceSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_nodalLoads, nodalLoadSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_nodalDOFMaps, nodalDOFMapSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_assemblyIndices, assemblyIndSize));
    CUDA_CHECK(cudaMalloc(&gpuData.d_assemblyOffsets, assemblyOffSize));
    
    return 0;
}

void GPU_FormUnbalance::freeGPUMemory() {
    if (gpuData.d_elementCoords) cudaFree(gpuData.d_elementCoords);
    if (gpuData.d_elementConnectivity) cudaFree(gpuData.d_elementConnectivity);
    if (gpuData.d_elementTypes) cudaFree(gpuData.d_elementTypes);
    if (gpuData.d_elementProperties) cudaFree(gpuData.d_elementProperties);
    if (gpuData.d_elementDOFMaps) cudaFree(gpuData.d_elementDOFMaps);
    if (gpuData.d_globalCoords) cudaFree(gpuData.d_globalCoords);
    if (gpuData.d_globalDisplacements) cudaFree(gpuData.d_globalDisplacements);
    if (gpuData.d_globalResidual) cudaFree(gpuData.d_globalResidual);
    if (gpuData.d_elementResiduals) cudaFree(gpuData.d_elementResiduals);
    if (gpuData.d_workSpace) cudaFree(gpuData.d_workSpace);
    if (gpuData.d_nodalLoads) cudaFree(gpuData.d_nodalLoads);
    if (gpuData.d_nodalDOFMaps) cudaFree(gpuData.d_nodalDOFMaps);
    if (gpuData.d_assemblyIndices) cudaFree(gpuData.d_assemblyIndices);
    if (gpuData.d_assemblyOffsets) cudaFree(gpuData.d_assemblyOffsets);
    
    memset(&gpuData, 0, sizeof(GPUData));
}

int GPU_FormUnbalance::formUnbalance(AnalysisModel* theAnalysisModel, LinearSOE* theSOE) {
    if (!theAnalysisModel || !theSOE) {
        std::cerr << "ERROR: Null AnalysisModel or LinearSOE" << std::endl;
        return -1;
    }
    
    auto startTime = std::chrono::high_resolution_clock::now();
    
    if (profilingEnabled) {
        CUDA_CHECK(cudaEventRecord(startEvent, streams.computeStream));
    }
    
    // Transfer data to GPU
    if (transferDataToGPU(theAnalysisModel) != 0) {
        std::cerr << "ERROR: Failed to transfer data to GPU" << std::endl;
        return -1;
    }
    
    // Zero global residual vector
    CUDA_CHECK(cudaMemsetAsync(gpuData.d_globalResidual, 0, currentDOFs * sizeof(double), streams.computeStream));
    
    // Compute element residuals
    if (computeElementResidualsGPU() != 0) {
        std::cerr << "ERROR: Failed to compute element residuals on GPU" << std::endl;
        return -1;
    }
    
    // Assemble element residuals into global vector
    if (assembleGlobalResidualGPU() != 0) {
        std::cerr << "ERROR: Failed to assemble global residual on GPU" << std::endl;
        return -1;
    }
    
    // Compute nodal unbalance
    if (computeNodalUnbalanceGPU() != 0) {
        std::cerr << "ERROR: Failed to compute nodal unbalance on GPU" << std::endl;
        return -1;
    }
    
    // Transfer result back to CPU
    if (transferResultFromGPU(theSOE) != 0) {
        std::cerr << "ERROR: Failed to transfer result from GPU" << std::endl;
        return -1;
    }
    
    // Synchronize and record timing
    CUDA_CHECK(cudaStreamSynchronize(streams.computeStream));
    
    if (profilingEnabled) {
        CUDA_CHECK(cudaEventRecord(stopEvent, streams.computeStream));
        CUDA_CHECK(cudaEventSynchronize(stopEvent));
        
        float gpuTime;
        CUDA_CHECK(cudaEventElapsedTime(&gpuTime, startEvent, stopEvent));
        lastComputeTime = gpuTime;
    }
    
    auto endTime = std::chrono::high_resolution_clock::now();
    auto totalTime = std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count() / 1000.0;
    
    if (profilingEnabled) {
        std::cout << "GPU formUnbalance: " << lastComputeTime << " ms (total: " << totalTime << " ms)" << std::endl;
    }
    
    return 0;
}

int GPU_FormUnbalance::transferDataToGPU(AnalysisModel* theAnalysisModel) {
    // Update current sizes
    FE_EleIter& eleIter = theAnalysisModel->getFEs();
    currentElements = 0;
    FE_Element* elePtr;
    while ((elePtr = eleIter()) != 0) currentElements++;
    
    DOF_GrpIter& dofIter = theAnalysisModel->getDOFs();
    currentNodes = 0;
    DOF_Group* dofPtr;
    while ((dofPtr = dofIter()) != 0) currentNodes++;
    
    currentDOFs = theAnalysisModel->getNumEqn();
    
    // Check if reallocation is needed
    if (reallocateIfNeeded(currentElements, currentDOFs, currentNodes) != 0) {
        return -1;
    }
    
    // Transfer element data
    if (updateElementData(theAnalysisModel) != 0) {
        return -1;
    }
    
    // Transfer nodal data
    if (updateNodalData(theAnalysisModel) != 0) {
        return -1;
    }
    
    return 0;
}

int GPU_FormUnbalance::updateElementData(AnalysisModel* theAnalysisModel) {
    // Prepare host data arrays
    std::vector<double> h_elementCoords(currentElements * 8 * 3);
    std::vector<int> h_elementConnectivity(currentElements * 8);
    std::vector<int> h_elementTypes(currentElements);
    std::vector<double> h_elementProperties(currentElements * 10);
    std::vector<int> h_elementDOFMaps(currentElements * 24);
    std::vector<double> h_globalDisplacements(currentDOFs);
    std::vector<int> h_assemblyIndices(currentElements * 24);
    
    // Get current displacements from LinearSOE
    const Vector& X = theAnalysisModel->getCurrentDisp();
    for (int i = 0; i < currentDOFs && i < X.Size(); i++) {
        h_globalDisplacements[i] = X(i);
    }
    
    // Iterate through elements and extract data
    FE_EleIter& eleIter1 = theAnalysisModel->getFEs();
    int elemIdx = 0;
    FE_Element* elePtr;
    
    while ((elePtr = eleIter1()) != 0 && elemIdx < currentElements) {
        Element* element = elePtr->getElement();
        if (!element) continue;
        
        // Get element type (simplified classification)
        const char* className = element->getClassType();
        int elemType = 1; // Default to brick element
        if (strstr(className, "Brick") || strstr(className, "brick")) {
            elemType = 1;
        }
        h_elementTypes[elemIdx] = elemType;
        
        // Get element nodes and coordinates
        const ID& nodes = element->getExternalNodes();
        int numNodes = nodes.Size();
        for (int i = 0; i < 8; i++) {
            if (i < numNodes) {
                h_elementConnectivity[elemIdx * 8 + i] = nodes(i);
                Node* node = theAnalysisModel->getDomain()->getNode(nodes(i));
                if (node) {
                    const Vector& coords = node->getCrds();
                    for (int j = 0; j < 3 && j < coords.Size(); j++) {
                        h_elementCoords[elemIdx * 8 * 3 + i * 3 + j] = coords(j);
                    }
                }
            } else {
                h_elementConnectivity[elemIdx * 8 + i] = -1; // Invalid node
            }
        }
        
        // Get element DOF mapping
        const ID& elementID = elePtr->getID();
        for (int i = 0; i < 24; i++) {
            if (i < elementID.Size()) {
                h_elementDOFMaps[elemIdx * 24 + i] = elementID(i);
                h_assemblyIndices[elemIdx * 24 + i] = elementID(i);
            } else {
                h_elementDOFMaps[elemIdx * 24 + i] = -1;
                h_assemblyIndices[elemIdx * 24 + i] = -1;
            }
        }
        
        // Get material properties (simplified)
        // This would need to be customized based on actual material types
        for (int i = 0; i < 10; i++) {
            if (i == 0) h_elementProperties[elemIdx * 10 + i] = 30000.0; // E
            else if (i == 1) h_elementProperties[elemIdx * 10 + i] = 0.3; // nu
            else h_elementProperties[elemIdx * 10 + i] = 0.0;
        }
        
        elemIdx++;
    }
    
    // Transfer to GPU
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_elementCoords, h_elementCoords.data(),
                              currentElements * 8 * 3 * sizeof(double),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_elementConnectivity, h_elementConnectivity.data(),
                              currentElements * 8 * sizeof(int),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_elementTypes, h_elementTypes.data(),
                              currentElements * sizeof(int),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_elementProperties, h_elementProperties.data(),
                              currentElements * 10 * sizeof(double),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_elementDOFMaps, h_elementDOFMaps.data(),
                              currentElements * 24 * sizeof(int),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_globalDisplacements, h_globalDisplacements.data(),
                              currentDOFs * sizeof(double),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_assemblyIndices, h_assemblyIndices.data(),
                              currentElements * 24 * sizeof(int),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    return 0;
}

int GPU_FormUnbalance::updateNodalData(AnalysisModel* theAnalysisModel) {
    std::vector<double> h_nodalLoads(currentNodes * 6, 0.0);
    std::vector<int> h_nodalDOFMaps(currentNodes * 6, -1);
    
    // Iterate through DOF groups
    DOF_GrpIter& dofIter = theAnalysisModel->getDOFs();
    int nodeIdx = 0;
    DOF_Group* dofPtr;
    
    while ((dofPtr = dofIter()) != 0 && nodeIdx < currentNodes) {
        const ID& dofID = dofPtr->getID();
        
        // Get nodal loads (simplified - would need integration with load patterns)
        Node* node = dofPtr->getNode();
        if (node) {
            // Get applied loads - this is simplified
            const Vector* loads = node->getUnbalancedLoad();
            if (loads) {
                for (int i = 0; i < 6 && i < loads->Size(); i++) {
                    h_nodalLoads[nodeIdx * 6 + i] = (*loads)(i);
                }
            }
        }
        
        // Get DOF mapping
        for (int i = 0; i < 6 && i < dofID.Size(); i++) {
            h_nodalDOFMaps[nodeIdx * 6 + i] = dofID(i);
        }
        
        nodeIdx++;
    }
    
    // Transfer to GPU
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_nodalLoads, h_nodalLoads.data(),
                              currentNodes * 6 * sizeof(double),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    CUDA_CHECK(cudaMemcpyAsync(gpuData.d_nodalDOFMaps, h_nodalDOFMaps.data(),
                              currentNodes * 6 * sizeof(int),
                              cudaMemcpyHostToDevice, streams.transferStream));
    
    return 0;
}

int GPU_FormUnbalance::computeElementResidualsGPU() {
    launch_computeElementResiduals(
        gpuData.d_elementCoords, gpuData.d_elementConnectivity, gpuData.d_elementTypes,
        gpuData.d_elementProperties, gpuData.d_globalDisplacements,
        gpuData.d_elementDOFMaps, gpuData.d_elementResiduals,
        currentElements, 8, 24, streams.computeStream
    );
    
    return checkGPUError("computeElementResidualsGPU") ? 0 : -1;
}

int GPU_FormUnbalance::computeNodalUnbalanceGPU() {
    launch_computeNodalUnbalance(
        gpuData.d_nodalLoads, gpuData.d_nodalDOFMaps,
        gpuData.d_globalResidual, currentNodes, 6, streams.computeStream
    );
    
    return checkGPUError("computeNodalUnbalanceGPU") ? 0 : -1;
}

int GPU_FormUnbalance::assembleGlobalResidualGPU() {
    launch_assembleGlobalResidual(
        gpuData.d_elementResiduals, gpuData.d_assemblyIndices,
        gpuData.d_assemblyOffsets, gpuData.d_globalResidual,
        currentElements, 24, streams.computeStream
    );
    
    return checkGPUError("assembleGlobalResidualGPU") ? 0 : -1;
}

int GPU_FormUnbalance::transferResultFromGPU(LinearSOE* theSOE) {
    // Get the RHS vector from LinearSOE
    Vector& B = theSOE->getB();
    
    // Ensure vector is the right size
    if (B.Size() != currentDOFs) {
        B.resize(currentDOFs);
    }
    
    // Transfer result from GPU
    std::vector<double> h_result(currentDOFs);
    CUDA_CHECK(cudaMemcpyAsync(h_result.data(), gpuData.d_globalResidual,
                              currentDOFs * sizeof(double),
                              cudaMemcpyDeviceToHost, streams.transferStream));
    
    CUDA_CHECK(cudaStreamSynchronize(streams.transferStream));
    
    // Copy to OpenSees Vector
    for (int i = 0; i < currentDOFs; i++) {
        B(i) = h_result[i];
    }
    
    return 0;
}

int GPU_FormUnbalance::reallocateIfNeeded(int newElements, int newDOFs, int newNodes) {
    if (newElements > maxElements || newDOFs > maxDOFs || newNodes > maxNodes) {
        std::cout << "Reallocating GPU memory: Elements " << maxElements << "->" << newElements
                  << ", DOFs " << maxDOFs << "->" << newDOFs
                  << ", Nodes " << maxNodes << "->" << newNodes << std::endl;
        
        freeGPUMemory();
        
        maxElements = std::max(newElements, maxElements);
        maxDOFs = std::max(newDOFs, maxDOFs);
        maxNodes = std::max(newNodes, maxNodes);
        
        return allocateGPUMemory();
    }
    return 0;
}

bool GPU_FormUnbalance::checkGPUError(const char* operation) {
    cudaError_t err = cudaGetLastError();
    if (err != cudaSuccess) {
        std::cerr << "CUDA error in " << operation << ": " << cudaGetErrorString(err) << std::endl;
        return false;
    }
    return true;
}