#include "GPUSolver.h"
#include "GPUElementSupport.h"
#include "GPUElementKernels.h"
#include "GPUConcreteKernels.h"
#include "GPUMatrixAssembly.h"
#include "GPUMaterialExtractor.h"  // 材料参数提取器
#include <cassert>
#include <cstring>
#include <iostream>
#include <Graph.h>
#include <Vertex.h>
#include <VertexIter.h>
#include <ID.h>
#include <Matrix.h>
#include <Domain.h>
#include <Element.h>
#include <Node.h>
#include <NodeIter.h>
#include <ElementIter.h>
#include <DOF_Group.h>
#include <AnalysisModel.h>
#include <FE_Element.h>
#include <FE_EleIter.h>
#include <cmath>
#include <cstdio>
#include <ctime>
#include <cusparse.h>
#include <cuda_runtime.h>
#include <cusolverSp.h>
#include <fstream>
#include <algorithm>
#include <set>
#include <map>
#include <chrono>
#include <cublas_v2.h>
#include <vector>
#include <string>
#include <sstream>
#include <iomanip>

// ============================================================
// Global flag for GPU solver mode
// ============================================================
bool g_useGPUSolver = false;  // 全局标志：是否使用GPU求解器
GPUSolver* g_gpuSolverInstance = nullptr;  // 全局GPU求解器实例指针（用于commitState）

// ============================================================
// C-style callback for Domain::commit() to avoid header dependency
// ============================================================
extern "C" {
	void gpuSolver_commitState() {
		if (g_useGPUSolver && g_gpuSolverInstance != nullptr) {
			g_gpuSolverInstance->commitState();
		}
	}
}

// ============================================================
// Performance optimization: Conditional debug output
// ============================================================
#if GPU_ENABLE_DEBUG_OUTPUT
    #define DEBUG_COUT std::cout
#else
    // Null output stream that discards all output
    namespace {
        class NullBuffer : public std::streambuf {
        public:
            int overflow(int c) { return c; }
        };
        static NullBuffer nullBuffer;
        static std::ostream nullStream(&nullBuffer);
    }
    #define DEBUG_COUT nullStream
#endif

// 前向声明（避免循环依赖）
class FourNodeQuad;
class NDMaterial;
class Response;
class Information;
// 真正的实现在FastVectorOps.cu文件中，这里不需要临时实现

// ====== Jacobi Preconditioner CUDA Function Declaration ======
extern "C" {
    void jacobiPrecondition(int n, const double* d_M_inv, const double* d_r, double* d_z);
    void jacobiPrecondition_fp32(int n, const float* d_M_inv, const float* d_r, float* d_z);
    void convertDoubleToFloat(int n, const double* d_input, float* d_output);
    void convertFloatToDouble(int n, const float* d_input, double* d_output);
}
// ====== End of Preconditioner Declaration ======

// ====== GPU Matrix Assembly Debug Output Control System Implementation ======

// Static variable definitions
bool GPUSolver::debugEnabled = false;
bool GPUSolver::moduleEnabled[5] = {false, false, false, false, false};
GPUSolver::DebugLevel GPUSolver::currentDebugLevel = GPUSolver::DebugLevel::ERROR;

// ====== Persistent GPU Resources Static Variables ======
cublasHandle_t GPUSolver::s_cublasHandle = nullptr;
cusparseHandle_t GPUSolver::s_cusparseHandle = nullptr;
bool GPUSolver::s_handlesInitialized = false;
int GPUSolver::s_instanceCount = 0;
// ====== End of Persistent GPU Resources Static Variables ======

// ====== GPU Vector Operations Static Variables ======
// 向量操作计时数据
double GPUSolver::vectorOpStartTimes[6] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
double GPUSolver::vectorOpTotalTimes[6] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
int GPUSolver::vectorOpCounts[6] = {0, 0, 0, 0, 0, 0};
bool GPUSolver::vectorOpTimingEnabled = true;

// 向量操作调试控制
bool GPUSolver::vectorOpDebugModules[5] = {false, false, false, false, false};
int GPUSolver::vectorOpDebugLevel = static_cast<int>(GPUSolver::DebugLevel::INFO);

// CUBLAS性能对比数据
double GPUSolver::lastCUBLASTime[6] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
double GPUSolver::lastFastOpTime[6] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
int GPUSolver::operationComparisonCount[6] = {0, 0, 0, 0, 0, 0};

// Helper function implementations
const char* GPUSolver::getModuleName(DebugModule module) {
    switch (module) {
        case DebugModule::MATRIX_ASSEMBLY: return "MATRIX_ASSEMBLY";
        case DebugModule::GPU_COMPUTE: return "GPU_COMPUTE";
        case DebugModule::MEMORY_MANAGEMENT: return "MEMORY_MANAGEMENT";
        case DebugModule::DATA_TRANSFER: return "DATA_TRANSFER";
        case DebugModule::ERROR_HANDLING: return "ERROR_HANDLING";
        default: return "UNKNOWN";
    }
}

const char* GPUSolver::getLevelName(DebugLevel level) {
    switch (level) {
        case DebugLevel::ERROR: return "ERROR";
        case DebugLevel::WARNING: return "WARNING";
        case DebugLevel::INFO: return "INFO";
        case DebugLevel::DEBUG: return "DEBUG";
        case DebugLevel::VERBOSE: return "VERBOSE";
        default: return "UNKNOWN";
    }
}

// Public interface implementations
void GPUSolver::enableDebug(bool enabled) {
    debugEnabled = enabled;
    if (enabled) {
        // Default: enable all modules when debug is enabled
        for (int i = 0; i < 5; i++) {
            moduleEnabled[i] = true;
        }
        currentDebugLevel = DebugLevel::WARNING; // Default level changed to WARNING to reduce output
        DEBUG_COUT << "[GPUSolver] Debug output enabled with default settings (WARNING level)" << std::endl;
    } else {
        DEBUG_COUT << "[GPUSolver] Debug output disabled" << std::endl;
    }
}

void GPUSolver::disableDebug() {
    debugEnabled = false;
}

void GPUSolver::setModuleDebugEnabled(int moduleIndex, bool enabled) {
    if (moduleIndex >= 0 && moduleIndex < 5) {
        moduleEnabled[moduleIndex] = enabled;
        DEBUG_COUT << "[GPUSolver] Module " << moduleIndex << " debug " 
                  << (enabled ? "enabled" : "disabled") << std::endl;
    }
}

void GPUSolver::setDebugVerbosity(int level) {
    if (level >= 0 && level <= 4) {
        currentDebugLevel = static_cast<DebugLevel>(level);
        DEBUG_COUT << "[GPUSolver] Debug level set to " << level << std::endl;
    }
}

void GPUSolver::enableAllDebugModules() {
    for (int i = 0; i < 5; i++) {
        moduleEnabled[i] = true;
    }
    DEBUG_COUT << "[GPUSolver] All debug modules enabled" << std::endl;
}

void GPUSolver::disableAllDebugModules() {
    for (int i = 0; i < 5; i++) {
        moduleEnabled[i] = false;
    }
    DEBUG_COUT << "[GPUSolver] All debug modules disabled" << std::endl;
}

// ====== End of Debug Output Control System Implementation ======

// ====== Enhanced CUDA Error Detection and Recovery System ======

// 错误统计结构体
struct GPUErrorStatistics {
    static int cudaErrorCount;
    static int cusparseErrorCount;
    static int cusolverErrorCount;
    static int totalErrorCount;
    
    static void incrementCudaError() { cudaErrorCount++; totalErrorCount++; }
    static void incrementCusparseError() { cusparseErrorCount++; totalErrorCount++; }
    static void incrementCusolverError() { cusolverErrorCount++; totalErrorCount++; }
    
    static void printStatistics() {
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::INFO,
            "GPU Error Statistics - CUDA: " << cudaErrorCount << 
            ", CUSPARSE: " << cusparseErrorCount << 
            ", CUSOLVER: " << cusolverErrorCount << 
            ", Total: " << totalErrorCount);
    }
};

// 静态变量定义
int GPUErrorStatistics::cudaErrorCount = 0;
int GPUErrorStatistics::cusparseErrorCount = 0;
int GPUErrorStatistics::cusolverErrorCount = 0;
int GPUErrorStatistics::totalErrorCount = 0;

// 增强版CUDA错误检查宏 - 支持错误统计和恢复策略
#define CHECK_CUDA_ENHANCED(func) \
    { \
        cudaError_t status = (func); \
        if (status != cudaSuccess) { \
            GPUErrorStatistics::incrementCudaError(); \
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR, \
                "CUDA Error in " << __FILE__ << ":" << __LINE__ << " - " << #func << \
                " failed with error: " << cudaGetErrorString(status) << " (code: " << status << ")"); \
            \
            /* 错误恢复策略 */ \
            if (status == cudaErrorMemoryAllocation) { \
                GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::WARNING, \
                    "Memory allocation failed - attempting GPU memory cleanup"); \
                cudaDeviceSynchronize(); \
                /* 可以在这里调用内存清理函数 */ \
            } \
        } \
    }

// 增强版CUSPARSE错误检查宏 - 支持错误统计和详细错误信息
#define CHECK_CUSPARSE_ENHANCED(func) \
    { \
        cusparseStatus_t status = (func); \
        if (status != CUSPARSE_STATUS_SUCCESS) { \
            GPUErrorStatistics::incrementCusparseError(); \
            const char* errorName = "UNKNOWN"; \
            switch(status) { \
                case CUSPARSE_STATUS_NOT_INITIALIZED: errorName = "NOT_INITIALIZED"; break; \
                case CUSPARSE_STATUS_ALLOC_FAILED: errorName = "ALLOC_FAILED"; break; \
                case CUSPARSE_STATUS_INVALID_VALUE: errorName = "INVALID_VALUE"; break; \
                case CUSPARSE_STATUS_ARCH_MISMATCH: errorName = "ARCH_MISMATCH"; break; \
                case CUSPARSE_STATUS_MAPPING_ERROR: errorName = "MAPPING_ERROR"; break; \
                case CUSPARSE_STATUS_EXECUTION_FAILED: errorName = "EXECUTION_FAILED"; break; \
                case CUSPARSE_STATUS_INTERNAL_ERROR: errorName = "INTERNAL_ERROR"; break; \
                case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: errorName = "MATRIX_TYPE_NOT_SUPPORTED"; break; \
                case CUSPARSE_STATUS_ZERO_PIVOT: errorName = "ZERO_PIVOT"; break; \
                default: break; \
            } \
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR, \
                "CUSPARSE Error in " << __FILE__ << ":" << __LINE__ << " - " << #func << \
                " failed with error: " << errorName << " (code: " << status << ")"); \
        } \
    }

// 增强版CUSOLVER错误检查宏 - 支持错误统计和详细错误信息
#define CHECK_CUSOLVER_ENHANCED(func) \
    { \
        cusolverStatus_t status = (func); \
        if (status != CUSOLVER_STATUS_SUCCESS) { \
            GPUErrorStatistics::incrementCusolverError(); \
            const char* errorName = "UNKNOWN"; \
            switch(status) { \
                case CUSOLVER_STATUS_NOT_INITIALIZED: errorName = "NOT_INITIALIZED"; break; \
                case CUSOLVER_STATUS_ALLOC_FAILED: errorName = "ALLOC_FAILED"; break; \
                case CUSOLVER_STATUS_INVALID_VALUE: errorName = "INVALID_VALUE"; break; \
                case CUSOLVER_STATUS_ARCH_MISMATCH: errorName = "ARCH_MISMATCH"; break; \
                case CUSOLVER_STATUS_MAPPING_ERROR: errorName = "MAPPING_ERROR"; break; \
                case CUSOLVER_STATUS_EXECUTION_FAILED: errorName = "EXECUTION_FAILED"; break; \
                case CUSOLVER_STATUS_INTERNAL_ERROR: errorName = "INTERNAL_ERROR"; break; \
                case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: errorName = "MATRIX_TYPE_NOT_SUPPORTED"; break; \
                case CUSOLVER_STATUS_NOT_SUPPORTED: errorName = "NOT_SUPPORTED"; break; \
                case CUSOLVER_STATUS_ZERO_PIVOT: errorName = "ZERO_PIVOT"; break; \
                case CUSOLVER_STATUS_INVALID_LICENSE: errorName = "INVALID_LICENSE"; break; \
                default: break; \
            } \
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR, \
                "CUSOLVER Error in " << __FILE__ << ":" << __LINE__ << " - " << #func << \
                " failed with error: " << errorName << " (code: " << status << ")"); \
        } \
    }

// 保持向后兼容的原有宏定义
#define CHECK_CUDA(func) CHECK_CUDA_ENHANCED(func)
#define CHECK_CUSPARSE(func) CHECK_CUSPARSE_ENHANCED(func)  
#define CHECK_CUSOLVER(func) CHECK_CUSOLVER_ENHANCED(func)

// GPU内存分配监控宏 - 集成CUDA内存分配和监控
#define CHECK_CUDA_MALLOC(ptr, size) \
    { \
        cudaError_t status = cudaMalloc(ptr, size); \
        if (status != cudaSuccess) { \
            GPUErrorStatistics::incrementCudaError(); \
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR, \
                "cudaMalloc failed in " << __FILE__ << ":" << __LINE__ << \
                " - size: " << size << " bytes, error: " << cudaGetErrorString(status)); \
            throw std::runtime_error("CUDA memory allocation failed"); \
        } else { \
            GPUMemoryMonitor::recordAllocation(*ptr, size); \
        } \
    }

// GPU内存释放监控宏 - 集成CUDA内存释放和监控
#define CHECK_CUDA_FREE(ptr) \
    { \
        if (ptr != nullptr) { \
            void* _temp_ptr = ptr; \
            cudaError_t status = cudaFree(_temp_ptr); \
            if (status != cudaSuccess) { \
                GPUErrorStatistics::incrementCudaError(); \
                GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR, \
                    "cudaFree failed in " << __FILE__ << ":" << __LINE__ << \
                    " - ptr: " << _temp_ptr << ", error: " << cudaGetErrorString(status)); \
            } else { \
                GPUMemoryMonitor::recordDeallocation(_temp_ptr); \
            } \
            ptr = nullptr; \
        } \
    }

// ====== GPU Error Monitoring Interface Implementation ======

// 获取总GPU错误数量
int GPUSolver::getGPUErrorCount() {
    return GPUErrorStatistics::totalErrorCount;
}

// 获取CUDA错误数量
int GPUSolver::getCudaErrorCount() {
    return GPUErrorStatistics::cudaErrorCount;
}

// 获取CUSPARSE错误数量
int GPUSolver::getCusparseErrorCount() {
    return GPUErrorStatistics::cusparseErrorCount;
}

// 获取CUSOLVER错误数量
int GPUSolver::getCusolverErrorCount() {
    return GPUErrorStatistics::cusolverErrorCount;
}

// 打印GPU错误统计信息
void GPUSolver::printGPUErrorStatistics() {
    GPUErrorStatistics::printStatistics();
}

// 重置GPU错误统计
void GPUSolver::resetGPUErrorStatistics() {
    GPUErrorStatistics::cudaErrorCount = 0;
    GPUErrorStatistics::cusparseErrorCount = 0;
    GPUErrorStatistics::cusolverErrorCount = 0;
    GPUErrorStatistics::totalErrorCount = 0;
    GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::INFO, "GPU error statistics reset");
}

// ====== End of GPU Error Monitoring Interface Implementation ======

// ====== Performance Timing System Implementation ======

// 性能计时统计结构体
struct GPUPerformanceTimer {
    static constexpr int NUM_TIMER_TYPES = 7;  // 对应TimerType枚举的数量（添加ERROR_RECOVERY）
    static bool timingEnabled;
    static std::chrono::high_resolution_clock::time_point startTimes[NUM_TIMER_TYPES];
    static double elapsedTimes[NUM_TIMER_TYPES];           // 当前计时会话的时间
    static double totalElapsedTimes[NUM_TIMER_TYPES];      // 累积总时间
    static int callCounts[NUM_TIMER_TYPES];                // 调用次数统计
    static bool timerActive[NUM_TIMER_TYPES];              // 计时器活动状态
    
    static const char* getTimerName(GPUSolver::TimerType timerType) {
        switch (timerType) {
            case GPUSolver::TimerType::TOTAL_SOLVE: return "Total Solve";
            case GPUSolver::TimerType::MEMORY_TRANSFER: return "Memory Transfer";
            case GPUSolver::TimerType::GPU_COMPUTATION: return "GPU Computation";
            case GPUSolver::TimerType::MATRIX_ASSEMBLY: return "Matrix Assembly";
            case GPUSolver::TimerType::ERROR_HANDLING: return "Error Handling";
            case GPUSolver::TimerType::DATA_SYNCHRONIZATION: return "Data Synchronization";
            case GPUSolver::TimerType::ERROR_RECOVERY: return "Error Recovery";
            default: return "Unknown Timer";
        }
    }
    
    static void printDetailedStatistics() {
        if (!timingEnabled) {
            printf("[PERF] Performance timing is disabled\n");
            return;
        }

        printf("\n=== GPU Performance Timing Statistics ===\n");

        double totalTime = 0.0;
        for (int i = 0; i < NUM_TIMER_TYPES; i++) {
            if (callCounts[i] > 0) {
                double avgTime = totalElapsedTimes[i] / callCounts[i];
                double percentage = 0.0;
                if (totalElapsedTimes[static_cast<int>(GPUSolver::TimerType::TOTAL_SOLVE)] > 0) {
                    percentage = (totalElapsedTimes[i] / totalElapsedTimes[static_cast<int>(GPUSolver::TimerType::TOTAL_SOLVE)]) * 100.0;
                }

                printf("  %-25s: Total=%.6fs, Calls=%d, Avg=%.6fs, Percentage=%.2f%%\n",
                    getTimerName(static_cast<GPUSolver::TimerType>(i)),
                    totalElapsedTimes[i],
                    callCounts[i],
                    avgTime,
                    percentage);

                totalTime += totalElapsedTimes[i];
            }
        }

        printf("  Total Measured Time: %.6fs\n", totalTime);
        printf("=== End of Performance Statistics ===\n\n");
        fflush(stdout);
    }
};

// 静态变量定义
bool GPUPerformanceTimer::timingEnabled = true;  // ✅ 启用性能分析
std::chrono::high_resolution_clock::time_point GPUPerformanceTimer::startTimes[GPUPerformanceTimer::NUM_TIMER_TYPES];
double GPUPerformanceTimer::elapsedTimes[GPUPerformanceTimer::NUM_TIMER_TYPES] = {0.0};
double GPUPerformanceTimer::totalElapsedTimes[GPUPerformanceTimer::NUM_TIMER_TYPES] = {0.0};
int GPUPerformanceTimer::callCounts[GPUPerformanceTimer::NUM_TIMER_TYPES] = {0};
bool GPUPerformanceTimer::timerActive[GPUPerformanceTimer::NUM_TIMER_TYPES] = {false};

// 性能计时接口实现
void GPUSolver::startTimer(TimerType timerType) {
    if (!GPUPerformanceTimer::timingEnabled) return;
    
    int index = static_cast<int>(timerType);
    if (index < 0 || index >= GPUPerformanceTimer::NUM_TIMER_TYPES) return;
    
    if (GPUPerformanceTimer::timerActive[index]) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
            "Timer " << GPUPerformanceTimer::getTimerName(timerType) << " is already active");
        return;
    }
    
    GPUPerformanceTimer::startTimes[index] = std::chrono::high_resolution_clock::now();
    GPUPerformanceTimer::timerActive[index] = true;
    
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::VERBOSE,
        "Started timer: " << GPUPerformanceTimer::getTimerName(timerType));
}

void GPUSolver::stopTimer(TimerType timerType) {
    if (!GPUPerformanceTimer::timingEnabled) return;
    
    int index = static_cast<int>(timerType);
    if (index < 0 || index >= GPUPerformanceTimer::NUM_TIMER_TYPES) return;
    
    if (!GPUPerformanceTimer::timerActive[index]) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
            "Timer " << GPUPerformanceTimer::getTimerName(timerType) << " is not active");
        return;
    }
    
    auto endTime = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration<double>(endTime - GPUPerformanceTimer::startTimes[index]);
    
    GPUPerformanceTimer::elapsedTimes[index] = duration.count();
    GPUPerformanceTimer::totalElapsedTimes[index] += GPUPerformanceTimer::elapsedTimes[index];
    GPUPerformanceTimer::callCounts[index]++;
    GPUPerformanceTimer::timerActive[index] = false;
    
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::VERBOSE,
        "Stopped timer: " << GPUPerformanceTimer::getTimerName(timerType) << 
        " - Elapsed: " << GPUPerformanceTimer::elapsedTimes[index] << "s");
}

double GPUSolver::getElapsedTime(TimerType timerType) {
    int index = static_cast<int>(timerType);
    if (index < 0 || index >= GPUPerformanceTimer::NUM_TIMER_TYPES) return 0.0;
    return GPUPerformanceTimer::elapsedTimes[index];
}

double GPUSolver::getTotalElapsedTime(TimerType timerType) {
    int index = static_cast<int>(timerType);
    if (index < 0 || index >= GPUPerformanceTimer::NUM_TIMER_TYPES) return 0.0;
    return GPUPerformanceTimer::totalElapsedTimes[index];
}

void GPUSolver::printTimingStatistics() {
    GPUPerformanceTimer::printDetailedStatistics();
}

void GPUSolver::resetTimingStatistics() {
    for (int i = 0; i < GPUPerformanceTimer::NUM_TIMER_TYPES; i++) {
        GPUPerformanceTimer::elapsedTimes[i] = 0.0;
        GPUPerformanceTimer::totalElapsedTimes[i] = 0.0;
        GPUPerformanceTimer::callCounts[i] = 0;
        GPUPerformanceTimer::timerActive[i] = false;
    }
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "Performance timing statistics reset");
}

void GPUSolver::enablePerformanceTiming(bool enabled) {
    GPUPerformanceTimer::timingEnabled = enabled;
    if (enabled) {
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "Performance timing enabled");
    } else {
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "Performance timing disabled");
    }
}

void GPUSolver::disablePerformanceTiming() {
    enablePerformanceTiming(false);
}

double GPUSolver::getAverageTime(TimerType timerType) {
    int index = static_cast<int>(timerType);
    if (index < 0 || index >= GPUPerformanceTimer::NUM_TIMER_TYPES) return 0.0;
    
    if (GPUPerformanceTimer::callCounts[index] > 0) {
        return GPUPerformanceTimer::totalElapsedTimes[index] / GPUPerformanceTimer::callCounts[index];
    }
    return 0.0;
}

// ====== End of Performance Timing System Implementation ======

// ====== GPU Memory Monitoring System Implementation ======

// GPU内存监控统计结构体
struct GPUMemoryMonitor {
    static bool monitoringEnabled;
    static size_t totalAllocated;          // 当前总分配的GPU内存 (字节)
    static size_t peakUsage;              // 峰值GPU内存使用量 (字节)
    static int allocationCount;           // GPU内存分配次数统计
    static int deallocationCount;         // GPU内存释放次数统计
    static std::vector<void*> allocatedPointers;    // 记录分配的指针用于泄漏检测
    static std::vector<size_t> allocationSizes;     // 对应的内存大小
    
    // 记录内存分配
    static void recordAllocation(void* ptr, size_t size) {
        if (!monitoringEnabled) return;
        
        totalAllocated += size;
        allocationCount++;
        
        if (totalAllocated > peakUsage) {
            peakUsage = totalAllocated;
        }
        
        // 记录指针用于泄漏检测
        allocatedPointers.push_back(ptr);
        allocationSizes.push_back(size);
        
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::VERBOSE,
            "GPU memory allocated: " << size << " bytes, ptr=" << ptr << 
            ", total=" << totalAllocated << " bytes");
    }
    
    // 记录内存释放
    static void recordDeallocation(void* ptr) {
        if (!monitoringEnabled) return;
        
        // 查找并移除指针记录
        for (size_t i = 0; i < allocatedPointers.size(); i++) {
            if (allocatedPointers[i] == ptr) {
                size_t freedSize = allocationSizes[i];
                totalAllocated -= freedSize;
                deallocationCount++;
                
                allocatedPointers.erase(allocatedPointers.begin() + i);
                allocationSizes.erase(allocationSizes.begin() + i);
                
                GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::VERBOSE,
                    "GPU memory freed: " << freedSize << " bytes, ptr=" << ptr << 
                    ", total=" << totalAllocated << " bytes");
                return;
            }
        }
        
        // 如果没找到，记录警告
        deallocationCount++;
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::WARNING,
            "GPU memory deallocation: pointer not found in allocation records: " << ptr);
    }
    
    // 检测内存泄漏
    static bool hasMemoryLeaks() {
        return !allocatedPointers.empty();
    }
    
    // 计算内存效率
    static double calculateEfficiency() {
        if (peakUsage == 0) return 100.0;
        
        // 查询GPU设备信息
        size_t free, total;
        cudaError_t result = cudaMemGetInfo(&free, &total);
        if (result != cudaSuccess) {
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::WARNING,
                "Failed to query GPU memory info: " << cudaGetErrorString(result));
            return -1.0;  // 无法计算
        }
        
        // 效率 = (实际使用的内存 / GPU总内存) * 100
        double efficiency = (double)peakUsage / (double)total * 100.0;
        return efficiency;
    }
    
    // 打印详细统计
    static void printDetailedStatistics() {
        if (!monitoringEnabled) {
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
                "GPU memory monitoring is disabled");
            return;
        }
        
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
            "=== GPU Memory Monitoring Statistics ===");
            
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
            "Current allocated: " << (totalAllocated / 1024.0 / 1024.0) << " MB (" << totalAllocated << " bytes)");
            
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
            "Peak usage: " << (peakUsage / 1024.0 / 1024.0) << " MB (" << peakUsage << " bytes)");
            
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
            "Allocation count: " << allocationCount << ", Deallocation count: " << deallocationCount);
            
        if (allocationCount != deallocationCount) {
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::WARNING,
                "Memory leak detected: " << (allocationCount - deallocationCount) << " unreleased allocations");
        }
        
        double efficiency = calculateEfficiency();
        if (efficiency >= 0) {
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
                "GPU memory efficiency: " << efficiency << "%");
        }
        
        // 查询当前GPU内存状态
        size_t free, total;
        cudaError_t result = cudaMemGetInfo(&free, &total);
        if (result == cudaSuccess) {
            size_t used = total - free;
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
                "GPU device memory: Total=" << (total / 1024.0 / 1024.0) << " MB, " << 
                "Used=" << (used / 1024.0 / 1024.0) << " MB, " <<
                "Free=" << (free / 1024.0 / 1024.0) << " MB");
        }
        
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
            "=== End of GPU Memory Statistics ===");
    }
    
    // 报告内存泄漏详情
    static void reportLeaks() {
        if (allocatedPointers.empty()) {
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::MEMORY_MANAGEMENT, GPUSolver::DebugLevel::INFO,
                "No GPU memory leaks detected");
            return;
        }
        
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR,
            "=== GPU Memory Leak Report ===");
            
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR,
            "Total leaked allocations: " << allocatedPointers.size());
            
        size_t totalLeaked = 0;
        for (size_t i = 0; i < allocatedPointers.size(); i++) {
            totalLeaked += allocationSizes[i];
            GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR,
                "Leak #" << (i+1) << ": ptr=" << allocatedPointers[i] << 
                ", size=" << allocationSizes[i] << " bytes");
        }
        
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR,
            "Total leaked memory: " << (totalLeaked / 1024.0 / 1024.0) << " MB (" << totalLeaked << " bytes)");
            
        GPU_DEBUG_PRINT(GPUSolver::DebugModule::ERROR_HANDLING, GPUSolver::DebugLevel::ERROR,
            "=== End of Leak Report ===");
    }
};

// 静态变量定义
bool GPUMemoryMonitor::monitoringEnabled = false;
size_t GPUMemoryMonitor::totalAllocated = 0;
size_t GPUMemoryMonitor::peakUsage = 0;
int GPUMemoryMonitor::allocationCount = 0;
int GPUMemoryMonitor::deallocationCount = 0;
std::vector<void*> GPUMemoryMonitor::allocatedPointers;
std::vector<size_t> GPUMemoryMonitor::allocationSizes;

// GPU内存监控公共接口实现
size_t GPUSolver::getTotalGPUMemoryAllocated() {
    return GPUMemoryMonitor::totalAllocated;
}

size_t GPUSolver::getPeakGPUMemoryUsage() {
    return GPUMemoryMonitor::peakUsage;
}

int GPUSolver::getGPUMemoryAllocationCount() {
    return GPUMemoryMonitor::allocationCount;
}

double GPUSolver::getGPUMemoryEfficiency() {
    return GPUMemoryMonitor::calculateEfficiency();
}

void GPUSolver::printGPUMemoryStatistics() {
    GPUMemoryMonitor::printDetailedStatistics();
}

void GPUSolver::resetGPUMemoryStatistics() {
    GPUMemoryMonitor::totalAllocated = 0;
    GPUMemoryMonitor::peakUsage = 0;
    GPUMemoryMonitor::allocationCount = 0;
    GPUMemoryMonitor::deallocationCount = 0;
    GPUMemoryMonitor::allocatedPointers.clear();
    GPUMemoryMonitor::allocationSizes.clear();
    
    GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, 
        "GPU memory monitoring statistics reset");
}

void GPUSolver::enableGPUMemoryMonitoring(bool enabled) {
    GPUMemoryMonitor::monitoringEnabled = enabled;
    if (enabled) {
        GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
            "GPU memory monitoring enabled");
    } else {
        GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
            "GPU memory monitoring disabled");
    }
}

void GPUSolver::disableGPUMemoryMonitoring() {
    enableGPUMemoryMonitoring(false);
}

bool GPUSolver::detectGPUMemoryLeaks() {
    return GPUMemoryMonitor::hasMemoryLeaks();
}

void GPUSolver::reportGPUMemoryLeaks() {
    GPUMemoryMonitor::reportLeaks();
}

// GPU内存监控扩展接口实现
void GPUSolver::initializeGPUMemoryTracking() {
    GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, 
        "GPU memory tracking system initialized");
    enableGPUMemoryMonitoring(true);
}

int GPUSolver::getTotalAllocations() {
    return GPUMemoryMonitor::allocationCount;
}

size_t GPUSolver::getCurrentAllocatedMemory() {
    return GPUMemoryMonitor::totalAllocated;
}

size_t GPUSolver::getPeakMemoryUsage() {
    return GPUMemoryMonitor::peakUsage;
}

// ====== End of GPU Memory Monitoring System Implementation ======

// ====== End of Enhanced CUDA Error Detection and Recovery System ======

// 前置声明：用于CG迭代求解器
int cgSolveGPU(
	int n, int nnz,
	const double* d_val, const int* d_rowPtr, const int* d_colInd,
	const double* d_b,
	double* d_x,
	double tol,
	int maxit
);


// 构造函数：初始化成员变量，并创建cusparse句柄
GPUSolver::GPUSolver()
	: LinearSOE(LinSOE_TAGS_GPUSolver), // 调用基类构造函数
	size(0),                                      // 方程规模初始化为0
	nnz(0),                                       // 非零元个数初始化为0
	handle(nullptr),                              // cusparse句柄初始化为空
	descrA(nullptr),                              // 稀疏矩阵描述符初始化为空
	m_cusolverHandle(nullptr),                    // cuSolver句柄初始化为空
	buffer(nullptr),                              // GPU缓冲区指针初始化为空
	bufferSize(0),                                // 缓冲区大小初始化为0
	m_matrixOnGPU(false),                         // 矩阵未在GPU上
	m_rhsOnGPU(false),                            // 右端项未在GPU上
	d_rowPtr(nullptr), d_colInd(nullptr),         // CSR格式行指针、列索引指针初始化为空
	d_val(nullptr), d_B(nullptr), d_X(nullptr),  // CSR值、右端项、解向量指针初始化为空
	// GPU Matrix Assembly Members Initialization
	numElements(0),                               // 单元数量初始化为0
	maxDOFPerElement(0),                         // 单元最大DOF数初始化为0
	d_elementStiffness(nullptr),                 // 单元刚度矩阵缓冲区初始化为空
	d_elementResidual(nullptr),                  // 单元残差向量缓冲区初始化为空
	d_elementDOF(nullptr),                       // 单元DOF映射表初始化为空
	d_elementTypes(nullptr),                     // 单元类型数组初始化为空
	d_elementData(nullptr),                      // 单元数据初始化为空
	// Phase 2: Nonlinear Materials - Dual State Buffers
	d_materialStates_committed(nullptr),         // Committed材料状态变量初始化为空
	d_materialStates_trial(nullptr),             // Trial材料状态变量初始化为空
	d_strainTotal(nullptr),                      // Committed应变初始化为空
	d_strainCurrent_trial(nullptr),              // Trial应变初始化为空
	numGaussPoints(0),                           // 积分点数量初始化为0
	d_tempMatrix(nullptr),                       // 临时矩阵计算空间初始化为空
	d_tempVector(nullptr),                       // 临时向量计算空间初始化为空
	// Element Statistics
	numSupportedElements(0),                     // 支持GPU的单元数量初始化为0
	numUnsupportedElements(0),                   // 不支持GPU的单元数量初始化为0
	computeStream(0),                            // GPU计算流初始化为0
	assemblyStream(0),                            // 矩阵组装流初始化为0
	// Phase 1 Optimization: DOF Mapping Cache
	m_dofMappingCached(false),                   // DOF映射缓存状态初始化为未缓存
	m_numFE(0),                                   // FE_Element数量初始化为0
	m_maxFEDOF(0),                                // FE最大DOF数初始化为0
	d_feDOFMapping(nullptr),                     // FE DOF映射GPU缓冲区初始化为空
	d_feDOFCount(nullptr),                       // FE DOF数量GPU缓冲区初始化为空
	d_feToElementMap(nullptr),                   // FE→Element映射GPU缓冲区初始化为空
	// Phase 2 Optimization: GPU Node Coordinate Update
	d_initialNodeCoords(nullptr),                // 初始节点坐标GPU缓冲区初始化为空
	d_currentNodeCoords(nullptr),                // 当前节点坐标GPU缓冲区初始化为空
	m_numNodes(0),                                // 节点数量初始化为0
	m_initialCoordsCached(false),                // 初始坐标缓存状态初始化为未缓存
	// CPU-GPU Data Synchronization Members
	d_nodeDisplacements(nullptr),                // 节点位移向量GPU缓冲区初始化为空
	d_nodeVelocities(nullptr),                   // 节点速度向量GPU缓冲区初始化为空
	d_nodeAccelerations(nullptr),                // 节点加速度向量GPU缓冲区初始化为空
	nodeDataSynchronized(false),                 // 数据同步状态初始化为未同步
	automaticSyncEnabled(true),                  // 默认启用自动同步
	lastSyncTime(0.0),                           // 上次同步时间初始化为0
	nodeDataSize(0),                             // 节点数据大小初始化为0
	h_tempNodeBuffer(nullptr),                   // CPU临时缓冲区初始化为空
	// Material State Synchronization Members
	d_materialStress(nullptr),                   // 材料应力状态GPU缓冲区初始化为空
	d_materialStrain(nullptr),                   // 材料应变状态GPU缓冲区初始化为空
	d_materialProperties(nullptr),               // 材料属性参数GPU缓冲区初始化为空
	d_materialStateMap(nullptr),                 // 材料状态映射表GPU缓冲区初始化为空
	materialDataSynchronized(false),             // 材料数据同步状态初始化为未同步
	materialSyncTime(0.0),                       // 材料状态同步时间初始化为0
	maxMaterialStates(0),                        // 最大材料状态数量初始化为0
	materialStateSize(0),                        // 单个材料状态大小初始化为0
	materialPropertySize(0),                     // 单个材料属性大小初始化为0
	h_tempMaterialBuffer(nullptr),               // CPU临时材料状态缓冲区初始化为空
	// Element Geometry Update Members
	d_elementCoordinates(nullptr),               // 单元节点坐标GPU缓冲区初始化为空
	d_deformedCoordinates(nullptr),              // 变形后坐标GPU缓冲区初始化为空
	d_deformationGradient(nullptr),              // 变形梯度GPU缓冲区初始化为空
	d_elementConnectivity(nullptr),              // 单元连接关系GPU缓冲区初始化为空
	d_referenceGeometry(nullptr),                // 参考几何GPU缓冲区初始化为空
	elementGeometryUpdated(false),               // 几何数据更新状态初始化为未更新
	largeDeformationEnabled(false),              // 默认禁用大变形模式
	geometryUpdateTime(0.0),                     // 几何更新时间戳初始化为0
	updatedElementCount(0),                      // 已更新几何单元数量初始化为0
	maxGeometryNodes(0),                         // 最大几何节点数量初始化为0
	h_tempGeometryBuffer(nullptr),               // CPU临时几何缓冲区初始化为空
	// Enhanced Debug Information Members
	detailedLoggingEnabled(false),               // 默认禁用详细日志记录
	debugOutputFile(""),                         // 调试输出文件路径初始化为空
	debugFileStream(nullptr),                    // 调试文件流初始化为空
	totalDataSyncEvents(0),                      // 总数据同步事件数初始化为0
	totalMemoryOperations(0),                    // 总内存操作数初始化为0
	totalKernelLaunches(0),                      // 总GPU核函数启动数初始化为0
	totalDataTransferTime(0.0),                  // 总数据传输时间初始化为0
	totalKernelExecutionTime(0.0),               // 总核函数执行时间初始化为0
	// GPU Vector Operations Members
	vectorOpManager(nullptr),                    // 向量操作管理器初始化为空
	vectorOpsInitialized(false),                 // 向量操作系统初始化状态为false
	cublassFallbackEnabled(false),               // 默认禁用CUBLAS回退
	vectorOpPerformanceComparison(false),        // 默认禁用性能对比
	vectorOpOptimizationLevel(1),                // 默认优化级别为1（优化模式）
	maxVectorSize(0),                            // 最大向量大小初始化为0
	totalVectorOperations(0),                    // 总向量操作次数初始化为0
	fastVectorOperations(0),                     // 自定义核函数操作次数初始化为0
	cublasVectorOperations(0),                   // CUBLAS操作次数初始化为0
	totalVectorOpTime(0.0),                      // 总向量操作时间初始化为0
	totalCUBLASTime(0.0),                        // 总CUBLAS操作时间初始化为0
	vectorOpErrorCount(0),                       // 向量操作错误总数初始化为0
	vectorKernelErrors(0),                       // 核函数启动错误初始化为0
	vectorMemoryErrors(0),                       // 内存操作错误初始化为0
	vectorValidationErrors(0),                   // 数值验证错误初始化为0
	// Preconditioner Members
	usePreconditioner(true),                     // 默认启用Jacobi预条件
	d_M_inv(nullptr),                            // Jacobi预条件矩阵初始化为空
	d_z(nullptr),                                // PCG中的z向量初始化为空
	preconditionerBuilt(false),                  // 预条件矩阵未构建
	// Warm Start Members
	warmStartEnabled(true),                      // 默认启用warm start
	d_X_prev(nullptr),                           // 上一次的解初始化为空
	hasWarmStartData(false),                     // 无可用warm start数据
	warmStartCounter(0),                         // warm start使用次数初始化为0
	coldStartCounter(0),                         // cold start使用次数初始化为0
	// Persistent GPU Resources Members
	d_r_persistent(nullptr),                     // 持久化工作向量r初始化为空
	d_p_persistent(nullptr),                     // 持久化工作向量p初始化为空
	d_Ap_persistent(nullptr),                    // 持久化工作向量Ap初始化为空
	dBuffer_persistent(nullptr),                 // 持久化cusparse缓冲区初始化为空
	persistentSize(0),                           // 持久化资源当前尺寸初始化为0
	persistentBufferSize(0)                      // 持久化缓冲区大小初始化为0
#if GPU_USE_MIXED_PRECISION
	, d_val_fp32(nullptr),                       // FP32矩阵值初始化为空
	d_B_fp32(nullptr),                           // FP32右端向量初始化为空
	d_X_fp32(nullptr),                           // FP32解向量初始化为空
	d_M_inv_fp32(nullptr),                       // FP32预条件矩阵初始化为空
	d_r_persistent_fp32(nullptr),                // FP32工作向量r初始化为空
	d_p_persistent_fp32(nullptr),                // FP32工作向量p初始化为空
	d_Ap_persistent_fp32(nullptr),               // FP32工作向量Ap初始化为空
	d_z_fp32(nullptr),                           // FP32预条件向量z初始化为空
	d_X_prev_fp32(nullptr)                       // FP32 warm start解初始化为空
#endif
{
	// 实例计数增加
	s_instanceCount++;

	// ====== 设置全局GPU模式标志 ======
	g_useGPUSolver = true;
	g_gpuSolverInstance = this;  // 设置全局实例指针
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"GPUSolver instance created, total instances: " << s_instanceCount << ", GPU mode enabled");
	// ====== End of GPU mode flag ======

	// 创建cusparse句柄，用于后续稀疏矩阵运算
	cusparseCreate(&handle);

	// ====== 创建持久化的cuSolver句柄（避免每次solve()都创建/销毁）======
	CHECK_CUSPARSE(cusparseCreateMatDescr(&descrA));
	CHECK_CUSPARSE(cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL));
	CHECK_CUSPARSE(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO));
	CHECK_CUSOLVER(cusolverSpCreate(&m_cusolverHandle));
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
					"Persistent cuSolver handle created successfully");
	// ====== End of cuSolver handle creation ======

	// 初始化CUDA内存监控系统
	initializeGPUMemoryTracking();
	
	// 初始化增强调试日志系统
	initializeDebugLogging();
	
	// ====== 启用详细调试和性能监控 (用于性能分析) ======
	// 启用调试输出
	enableDebug(true);
	enableAllDebugModules();
	setDebugVerbosity(static_cast<int>(DebugLevel::INFO));
	
	// 启用性能计时
	enablePerformanceTiming(true);
	
	// 启用GPU内存监控
	enableGPUMemoryMonitoring(true);
	
	// ====== 初始化GPU向量操作管理器 ======
	// 创建向量操作管理器实例
	vectorOpManager = new VectorOpManager();
	
	// 初始化向量操作系统（默认最大向量大小为100000）
	int maxVecSize = 100000;
	int result = initializeVectorOps(maxVecSize);
	if (result == 0) {
		GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
		                "GPU Vector Operations Manager initialized successfully, max vector size: " << maxVecSize);
		
		// 启用向量操作性能对比（用于性能分析）
		enableVectorOpPerformanceComparison(true);
		GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
		                "Vector operation performance comparison enabled");
		
		// 设置优化级别
		setVectorOpOptimizationLevel(vectorOpOptimizationLevel);
		GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
		                "Vector operation optimization level set to: " << vectorOpOptimizationLevel);
	} else {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
		                "Failed to initialize GPU Vector Operations Manager, error code: " << result);
		// 启用CUBLAS回退模式
		enableCUBLASFallback(true);
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING, 
		                "Fallback to CUBLAS mode enabled due to initialization failure");
	}
	
	// Test debug output system
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, "GPUSolver constructor called with full debugging enabled");
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "Performance timing and GPU monitoring enabled");
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG, "cusparse handle created");
}

// 析构函数：释放GPU资源和相关句柄
GPUSolver::~GPUSolver()
{
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, "GPUSolver destructor called");

	// ====== 清除全局实例指针 ======
	if (g_gpuSolverInstance == this) {
		g_gpuSolverInstance = nullptr;
		g_useGPUSolver = false;
	}
	// ====== End of global pointer cleanup ======

	// ✅ 打印最终性能统计报告（始终输出到stdout）
	if (GPUPerformanceTimer::timingEnabled) {
		printf("\n");
		printf("==========================================\n");
		printf("  GPUSolver Final Performance Report\n");
		printf("==========================================\n");
		printTimingStatistics();
	}

	// 打印GPU内存统计报告
	if (GPUMemoryMonitor::monitoringEnabled) {
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
			"GPUSolver session completed - generating memory usage report");
		printGPUMemoryStatistics();

		// 检查并报告内存泄漏（暂时禁用以避免误报）
		// TODO: 修复内存监控系统的分配/释放匹配问题
		/*
		if (detectGPUMemoryLeaks()) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
				"GPU memory leaks detected during session cleanup");
			reportGPUMemoryLeaks();
		}
		*/
	}

	// 打印GPU错误统计报告
	if (GPUErrorStatistics::totalErrorCount > 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING, 
			"GPUSolver session completed with errors detected");
		printGPUErrorStatistics();
	} else {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::INFO, 
			"GPUSolver session completed with no GPU errors detected");
	}
	
	// 释放GPU矩阵组装内存
	freeGPUMatrixMemory();

	// 释放CPU-GPU数据同步资源
	freeNodeDataBuffers();

	// 释放材料状态同步资源
	freeMaterialStateBuffers();

	// 释放非线性材料状态变量
	freeNonlinearMaterialStates();
	
	// 释放几何更新同步资源
	freeGeometryBuffers();
	
	// 完成增强调试日志记录
	finalizeDebugLogging();
	
	// ====== 清理GPU向量操作管理器 ======
	if (vectorOpManager != nullptr) {
		// 打印向量操作性能统计报告
		if (vectorOpPerformanceComparison) {
			GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
				"GPU Vector Operations session completed - generating performance comparison report");
			printVectorOpPerformanceComparison();
		}
		
		// 打印向量操作计时统计
		printVectorOpTimingStatistics();
		
		// 打印向量操作错误统计
		if (vectorOpErrorCount > 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING, 
				"Vector operations completed with " << vectorOpErrorCount << " errors detected");
			printVectorOpErrorStatistics();
		} else {
			GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
				"Vector operations completed with no errors detected");
		}
		
		// 清理向量操作系统
		int result = cleanupVectorOps();
		if (result == 0) {
			GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, 
				"GPU Vector Operations Manager cleanup completed successfully");
		} else {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"GPU Vector Operations Manager cleanup failed, error code: " << result);
		}
		
		// 删除向量操作管理器实例
		delete vectorOpManager;
		vectorOpManager = nullptr;
		vectorOpsInitialized = false;
		
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"VectorOpManager instance destroyed");
	}

	// ====== 清理预条件子相关资源 ======
	if (d_M_inv) {
		cudaFree(d_M_inv);
		d_M_inv = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Jacobi preconditioner matrix d_M_inv freed");
	}
	if (d_z) {
		cudaFree(d_z);
		d_z = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"PCG z-vector d_z freed");
	}
	// ====== End of Preconditioner Cleanup ======

	// ====== 清理Warm Start相关资源 ======
	if (d_X_prev) {
		cudaFree(d_X_prev);
		d_X_prev = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Warm start previous solution d_X_prev freed");
	}

	// 输出Warm Start统计信息
	if (warmStartCounter > 0 || coldStartCounter > 0) {
		GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
			"Warm start statistics: warm=" << warmStartCounter <<
			", cold=" << coldStartCounter);
	}
	// ====== End of Warm Start Cleanup ======

	// ====== 清理持久化GPU资源 ======
	// 清理实例持久化工作向量
	if (d_r_persistent) {
		cudaFree(d_r_persistent);
		d_r_persistent = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Persistent work vector d_r_persistent freed");
	}
	if (d_p_persistent) {
		cudaFree(d_p_persistent);
		d_p_persistent = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Persistent work vector d_p_persistent freed");
	}
	if (d_Ap_persistent) {
		cudaFree(d_Ap_persistent);
		d_Ap_persistent = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Persistent work vector d_Ap_persistent freed");
	}
	if (dBuffer_persistent) {
		cudaFree(dBuffer_persistent);
		dBuffer_persistent = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Persistent cusparse buffer dBuffer_persistent freed");
	}

	// 实例计数减1
	s_instanceCount--;

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"GPUSolver instance destroyed, remaining instances: " << s_instanceCount);

	// 如果是最后一个实例，清理全局CUDA handles
	if (s_instanceCount == 0 && s_handlesInitialized) {
		GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
			"Cleaning up global CUDA handles (last instance)");

		if (s_cublasHandle) {
			cublasDestroy(s_cublasHandle);
			s_cublasHandle = nullptr;
			GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
				"Global CUBLAS handle destroyed");
		}
		if (s_cusparseHandle) {
			cusparseDestroy(s_cusparseHandle);
			s_cusparseHandle = nullptr;
			GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
				"Global CUSPARSE handle destroyed");
		}

		s_handlesInitialized = false;
	}
	// ====== End of Persistent GPU Resources Cleanup ======

	// 释放原有GPU内存
	freeGPU();

	// 销毁CUSPARSE资源
	if (descrA) cusparseDestroyMatDescr(descrA);
	if (handle) cusparseDestroy(handle);

	// ====== 销毁持久化的cuSolver句柄 ======
	if (m_cusolverHandle) {
		cusolverSpDestroy(m_cusolverHandle);
		m_cusolverHandle = nullptr;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
						"Persistent cuSolver handle destroyed successfully");
	}
	// ====== End of cuSolver handle destruction ======

	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, "GPU resources freed successfully");
}

// ====== GPU Vector Operations Method Implementations ======

/**
 * 初始化GPU向量操作系统
 */
int GPUSolver::initializeVectorOps(int maxVectorSize) {
    if (!vectorOpManager) return -1;
    
    this->maxVectorSize = maxVectorSize;
    
    int result = vectorOpManager->initialize(maxVectorSize);
    if (result == 0) {
        vectorOpsInitialized = true;
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
                       "Vector operations initialized with max size: " << maxVectorSize);
    }
    
    return result;
}

/**
 * 清理GPU向量操作系统
 */
int GPUSolver::cleanupVectorOps() {
    if (!vectorOpManager) return -1;
    
    int result = vectorOpManager->cleanup();
    if (result == 0) {
        vectorOpsInitialized = false;
        GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, 
                       "Vector operations cleanup completed");
    }
    
    return result;
}

/**
 * 设置向量操作优化级别
 */
int GPUSolver::setVectorOpOptimizationLevel(int level) {
    if (!vectorOpManager) return -1;
    
    int result = vectorOpManager->setOptimizationLevel(level);
    if (result == 0) {
        vectorOpOptimizationLevel = level;
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
                       "Vector operation optimization level set to: " << level);
    }
    
    return result;
}

/**
 * 启用CUBLAS回退模式
 */
int GPUSolver::enableCUBLASFallback(bool enabled) {
    cublassFallbackEnabled = enabled;
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
                   "CUBLAS fallback mode " << (enabled ? "enabled" : "disabled"));
    return 0;
}

/**
 * 启用向量操作性能对比
 */
int GPUSolver::enableVectorOpPerformanceComparison(bool enabled) {
    vectorOpPerformanceComparison = enabled;
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
                   "Vector operation performance comparison " << (enabled ? "enabled" : "disabled"));
    return 0;
}

/**
 * 打印向量操作性能对比报告 - 框架版本
 */
void GPUSolver::printVectorOpPerformanceComparison() {
    if (!vectorOpPerformanceComparison) {
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
                       "Vector operation performance comparison is disabled");
        return;
    }
    
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                   "=== Vector Operation Performance Comparison ===");
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                   "Fast operations: " << fastVectorOperations);
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                   "CUBLAS operations: " << cublasVectorOperations);
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                   "Total vector operations: " << totalVectorOperations);
}

/**
 * 静态向量操作错误统计方法 - 框架版本
 */
void GPUSolver::printVectorOpErrorStatistics() {
    GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::INFO,
                   "=== Vector Operation Error Statistics ===");
    // 框架实现 - 具体统计在后续阶段完善
}

/**
 * 静态向量操作计时统计方法 - 框架版本
 */
void GPUSolver::printVectorOpTimingStatistics() {
    if (!vectorOpTimingEnabled) {
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, 
                       "Vector operation timing is disabled");
        return;
    }
    
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                   "=== Vector Operation Timing Statistics ===");
    
    // 基础框架实现
    for (int i = 0; i < 6; i++) {
        if (vectorOpCounts[i] > 0) {
            double avgTime = vectorOpTotalTimes[i] / vectorOpCounts[i];
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                           "Operation " << i << ": Total=" << vectorOpTotalTimes[i] << "s, " <<
                           "Calls=" << vectorOpCounts[i] << ", " <<
                           "Avg=" << avgTime << "s");
        }
    }
}

// 获取方程规模
int GPUSolver::getNumEqn(void) const { return size; }

// 设置方程规模
int GPUSolver::setSize(Graph& theGraph)
{
	int oldSize = size;
	size = theGraph.getNumVertex();
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, "setSize called with size = " << size);

	// ===== Warm Start: 如果矩阵尺寸变化，清除warm start数据 =====
	if (size != oldSize && oldSize != 0) {
		if (d_X_prev) {
			cudaFree(d_X_prev);
			d_X_prev = nullptr;
		}
		hasWarmStartData = false;

		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
			"Matrix size changed (" << oldSize << " -> " << size << "), warm start data cleared");
	}
	
	std::vector<std::set<int>> pattern(size);
	VertexIter& theVertices = theGraph.getVertices();
	Vertex* vertexPtr = nullptr;
	while ((vertexPtr = theVertices()) != nullptr) {
		int row = vertexPtr->getTag();
		const ID& adj = vertexPtr->getAdjacency();
		for (int j = 0; j < adj.Size(); ++j) {
			int col = adj(j);
			if (row >= 0 && row < size && col >= 0 && col < size)
				pattern[row].insert(col);
		}
		pattern[row].insert(row);
	}
	// 组装 rowPtr
	rowPtr.assign(size + 1, 0);
	for (int i = 0; i < size; ++i)
		rowPtr[i + 1] = rowPtr[i] + pattern[i].size();
	nnz = rowPtr[size];
	colInd.resize(nnz);
	val.resize(nnz, 0.0);
	// 填充 colInd
	for (int i = 0; i < size; ++i) {
		int idx = rowPtr[i];
		for (int col : pattern[i]) {
			colInd[idx++] = col;
		}
	}
	// 初始化X/B
	X.resize(size); X.Zero();
	B.resize(size); B.Zero();
	// --- 调试输出 ---
	// printf("[DEBUG] setSize: size=%d nnz=%d\n", size, nnz);
	// for (int i = 0; i < size+1; ++i) printf("rowPtr[%d]=%d ", i, rowPtr[i]);
	// printf("\n");
	// for (int k = 0; k < nnz; ++k) printf("colInd[%d]=%d val[%d]=%g ", k, colInd[k], k, val[k]);
	// printf("\n");
	// --- 合法性断言 ---
	assert(rowPtr.size() == size + 1);
	assert(colInd.size() == nnz);
	assert(val.size() == nnz);
	for (int i = 0; i < size; ++i) {
		assert(rowPtr[i + 1] >= rowPtr[i]);
		for (int k = rowPtr[i]; k < rowPtr[i + 1]; ++k) {
			assert(colInd[k] >= 0 && colInd[k] < size);
		}
	}
	allocateGPU();
	// rowPtr和colInd只需拷贝一次
	CHECK_CUDA(cudaMemcpy(d_rowPtr, rowPtr.data(), (size + 1) * sizeof(int), cudaMemcpyHostToDevice));
	CHECK_CUDA(cudaMemcpy(d_colInd, colInd.data(), nnz * sizeof(int), cudaMemcpyHostToDevice));

	// 阶段1：只初始化几何和材料数据（不提取DOF，修复时序问题）
	// DOF映射将延迟到formTangent_GPU()首次调用时提取
	int elementInitResult = initializeGeometryAndMaterial();
	if (elementInitResult != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Failed to initialize geometry and material data");
		return -1;
	}

	// ✅ 启用GPU组装模式，完全禁用CPU组装（避免双重组装）
	m_matrixOnGPU = true;
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"GPU assembly mode enabled, CPU addA() will be skipped");

	DEBUG_COUT << "\n[OPTIMIZATION] GPU mode enabled early to prevent CPU waste\n";
	DEBUG_COUT << "All Element::getTangent() calls will be discarded.\n";
	DEBUG_COUT << "Matrix will be assembled on GPU in solve().\n\n";

	return 0;
}

// 添加矩阵A的非零元
int GPUSolver::addA(const Matrix& m, const ID& id, double fact)
{
	// ✅ 如果GPU已组装矩阵，跳过CPU组装（避免双重组装）
	if (m_matrixOnGPU) {
		static int skip_count = 0;
		static int total_skipped_elements = 0;
		skip_count++;
		total_skipped_elements++;  // 累计跳过的单元数

		if (skip_count <= 3) {
			DEBUG_COUT << "[DEBUG] addA() skipped (GPU already assembled), skip_count="
			          << skip_count << "\n";
		}

		// ⚠️ 性能警告：每次addA()被调用说明CPU已经计算了Element刚度！
		if (skip_count == 10000) {
			DEBUG_COUT << "\n⚠️  [PERFORMANCE WARNING] ⚠️\n";
			DEBUG_COUT << "CPU Element::getTangent() was called " << total_skipped_elements << " times\n";
			DEBUG_COUT << "but results were discarded because GPU assembly is enabled!\n";
			DEBUG_COUT << "Estimated wasted CPU time: ~"
			          << (total_skipped_elements * 30 / 1000.0) << " ms\n";
			DEBUG_COUT << "This is an OpenSees framework limitation.\n";
			DEBUG_COUT << "===================================================\n\n";
		}

		return 0;
	}

	// ====== 调试：检查addA()是否被调用 ======
	static int addA_call_count = 0;
	addA_call_count++;
	if (addA_call_count <= 5 || addA_call_count % 100 == 0) {
		DEBUG_COUT << "[DEBUG] addA() called, count=" << addA_call_count
		          << " matrix_size=" << id.Size() << "x" << id.Size() << "\n";
		fflush(stdout);
	}
	// ====== End of debug code ======

	// 存储格式调试代码 - 检测是否只存储下三角
	static bool first_time = true;
	static int upper_count = 0, lower_count = 0, diag_count = 0;

	int n = id.Size();
	for (int i = 0; i < n; ++i) {
		int row = id(i);
		if (row < 0 || row >= size) continue;
		for (int j = 0; j < n; ++j) {
			int col = id(j);
			if (col < 0 || col >= size) continue;

			// 统计上三角、下三角、对角元素的数量
			if (first_time && std::abs(m(i, j)) > 1e-15) {
				if (row < col) upper_count++;
				else if (row > col) lower_count++;
				else diag_count++;
			}

			// 在 rowPtr[row] ~ rowPtr[row+1] 查找 colInd == col
			for (int k = rowPtr[row]; k < rowPtr[row + 1]; ++k) {
				if (colInd[k] == col) {
					val[k] += m(i, j) * fact;
					break;
				}
			}
		}
	}

	// 输出存储格式诊断信息（只输出一次）
	if (first_time && (upper_count + lower_count + diag_count) > 100) {
		printf("\n[GPUSolver Storage Format Debug]\n");
		printf("  Diagonal elements:  %d\n", diag_count);
		printf("  Lower triangle:     %d\n", lower_count);
		printf("  Upper triangle:     %d\n", upper_count);
		printf("  Total non-zeros:    %d\n", upper_count + lower_count + diag_count);

		if (upper_count > 0 && lower_count > 0) {
			printf("  => FULL symmetric matrix storage\n");
			printf("  => Memory waste: ~50%% (storing both triangles)\n");
			printf("  => Optimization potential: Use triangular storage only\n");
		} else if (lower_count > 0 && upper_count == 0) {
			printf("  => Lower triangular storage only\n");
			printf("  => Storage format: OPTIMAL for symmetric matrices\n");
		} else if (upper_count > 0 && lower_count == 0) {
			printf("  => Upper triangular storage only\n");
			printf("  => Storage format: OPTIMAL for symmetric matrices\n");
		}
		printf("\n");
		first_time = false;
	}

	// ✅ GPU模式不会执行到这里（已在开头return），所以不重置标志
	return 0;
}

// 添加右端项B
int GPUSolver::addB(const Vector& v, const ID& id, double fact)
{
	int n = id.Size();
	for (int i = 0; i < n; ++i) {
		int loc = id(i);
		if (loc >= 0 && loc < size)
			B[loc] += v(i) * fact;
	}
	m_rhsOnGPU = false;  // CPU端修改了B，需重新传输到GPU
	return 0;
}

// 设置右端项B
int GPUSolver::setB(const Vector& v, double fact)
{
	B.Zero();
	int n = v.Size();
	for (int i = 0; i < n && i < size; ++i)
		B[i] = v(i) * fact;
	m_rhsOnGPU = false;  // CPU端修改了B，需重新传输到GPU
	return 0;
}

// 清空矩阵A
void GPUSolver::zeroA(void)
{
	// ✅ 如果GPU模式，跳过CPU清零（GPU会在formTangent_GPU中清零d_val）
	if (m_matrixOnGPU) {
		// GPU模式：不需要清零CPU矩阵，也不重置标志
		return;
	}

	// CPU模式：清零CPU矩阵
	std::fill(val.begin(), val.end(), 0.0);
	m_matrixOnGPU = false;  // CPU端清零，需重新传输到GPU
}

// 清空右端项B
void GPUSolver::zeroB(void)
{
	B.Zero();
	m_rhsOnGPU = false;  // CPU端清零，需重新传输到GPU
}

// 设置解向量X
void GPUSolver::setX(int loc, double value)
{
	if (loc >= 0 && loc < size)
		X[loc] = value;
}

// 设置解向量X
void GPUSolver::setX(const Vector& x)
{
	int n = x.Size();
	for (int i = 0; i < n && i < size; ++i)
		X[i] = x(i);
}

// 获取解向量X
const Vector& GPUSolver::getX(void) { return X; }

// 获取右端项B
const Vector& GPUSolver::getB(void) { return B; }

// 计算右端项B的范数
double GPUSolver::normRHS(void)
{
	return B.Norm();
}

// 导出全局刚度矩阵
void GPUSolver::exportGlobalStiffnessMatrix(const char* filename)
{
	std::vector<std::vector<double>> dense(size, std::vector<double>(size, 0.0));
	for (int i = 0; i < size; ++i) {
		for (int k = rowPtr[i]; k < rowPtr[i + 1]; ++k) {
			int col = colInd[k];
			dense[i][col] += val[k];
		}
	}
	std::ofstream fout(filename);
	if (!fout) return;
	for (int i = 0; i < size; ++i) {
		for (int j = 0; j < size; ++j) {
			fout << dense[i][j];
			if (j < size - 1) fout << " ";
		}
		fout << "\n";
	}
	fout.close();
}

// 分配GPU内存
void GPUSolver::allocateGPU()
{
	CHECK_CUDA_MALLOC((void**)&d_rowPtr, (size + 1) * sizeof(int));
	CHECK_CUDA_MALLOC((void**)&d_colInd, nnz * sizeof(int));
	CHECK_CUDA_MALLOC((void**)&d_val, nnz * sizeof(double));
	CHECK_CUDA_MALLOC((void**)&d_B, size * sizeof(double));
	CHECK_CUDA_MALLOC((void**)&d_X, size * sizeof(double));
}

// 释放GPU内存
void GPUSolver::freeGPU()
{
	CHECK_CUDA_FREE(d_rowPtr);
	CHECK_CUDA_FREE(d_colInd);
	CHECK_CUDA_FREE(d_val);
	CHECK_CUDA_FREE(d_B);
	CHECK_CUDA_FREE(d_X);
	if (buffer) CHECK_CUDA_FREE(buffer);
	bufferSize = 0;
}

// ====== GPU Matrix Assembly Memory Management ======

// 分配GPU矩阵组装所需内存
int GPUSolver::allocateGPUMatrixMemory()
{
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, 
					"allocateGPUMatrixMemory() called, numElements=" << numElements 
					<< " maxDOFPerElement=" << maxDOFPerElement);

	// 先释放已有的矩阵组装内存
	freeGPUMatrixMemory();

	// 检查参数有效性
	if (numElements <= 0 || maxDOFPerElement <= 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
						"Invalid parameters: numElements=" << numElements 
						<< " maxDOFPerElement=" << maxDOFPerElement);
		return -1;
	}

	try {
		// 分配单元刚度矩阵缓冲区 (每个单元最大 maxDOFPerElement x maxDOFPerElement)
		size_t stiffnessSize = numElements * maxDOFPerElement * maxDOFPerElement * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_elementStiffness, stiffnessSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
						"Allocated element stiffness buffer: " << stiffnessSize << " bytes");

		// 分配单元残差向量缓冲区
		size_t residualSize = numElements * maxDOFPerElement * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_elementResidual, residualSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
						"Allocated element residual buffer: " << residualSize << " bytes");

		// 分配单元DOF映射表 (每个单元最大maxDOFPerElement个DOF)
		size_t dofSize = numElements * maxDOFPerElement * sizeof(int);
		CHECK_CUDA_MALLOC((void**)&d_elementDOF, dofSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
						"Allocated element DOF mapping: " << dofSize << " bytes");

		// 分配单元类型数组
		size_t typesSize = numElements * sizeof(int);
		CHECK_CUDA_MALLOC((void**)&d_elementTypes, typesSize);

		// 分配单元数据缓冲区（GPUElementData结构体数组）
		size_t dataSize = numElements * sizeof(GPUElementData);
		CHECK_CUDA_MALLOC((void**)&d_elementData, dataSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
						"Allocated element data buffer: " << dataSize << " bytes ("
						<< numElements << " x " << sizeof(GPUElementData) << " bytes/element)");

		// 分配临时计算空间 (预估全局矩阵大小)
		size_t tempMatrixSize = size * maxDOFPerElement * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_tempMatrix, tempMatrixSize);

		size_t tempVectorSize = size * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_tempVector, tempVectorSize);

		// 创建CUDA流
		CHECK_CUDA(cudaStreamCreate(&computeStream));
		CHECK_CUDA(cudaStreamCreate(&assemblyStream));
		

		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO, 
						"GPU matrix assembly memory allocated successfully");

		// 初始化内存为0
		CHECK_CUDA(cudaMemset(d_elementStiffness, 0, stiffnessSize));
		CHECK_CUDA(cudaMemset(d_elementResidual, 0, residualSize));

		return 0;
	}
	catch (...) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
						"Exception during GPU matrix assembly memory allocation");
		freeGPUMatrixMemory();
		return -1;
	}
}

// 释放GPU矩阵组装内存
void GPUSolver::freeGPUMatrixMemory()
{
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
					"freeGPUMatrixMemory() called");

	// 释放CUDA流
	if (computeStream) {
		CHECK_CUDA(cudaStreamDestroy(computeStream));
		computeStream = 0;
	}
	if (assemblyStream) {
		CHECK_CUDA(cudaStreamDestroy(assemblyStream));
		assemblyStream = 0;
	}

	// 释放GPU内存
	CHECK_CUDA_FREE(d_elementStiffness);
	CHECK_CUDA_FREE(d_elementResidual);
	CHECK_CUDA_FREE(d_elementDOF);
	CHECK_CUDA_FREE(d_elementTypes);
	CHECK_CUDA_FREE(d_elementData);
	CHECK_CUDA_FREE(d_tempMatrix);
	CHECK_CUDA_FREE(d_tempVector);

	// ====== Phase 1 Optimization: 释放DOF映射缓存 ======
	// 注意：这些内存通过CHECK_CUDA(cudaMalloc)分配，不在监控器记录中
	// 直接用cudaFree释放，避免"pointer not found"警告
	if (d_feDOFMapping) { cudaFree(d_feDOFMapping); d_feDOFMapping = nullptr; }
	if (d_feDOFCount) { cudaFree(d_feDOFCount); d_feDOFCount = nullptr; }
	if (d_feToElementMap) { cudaFree(d_feToElementMap); d_feToElementMap = nullptr; }
	m_dofMappingCached = false;
	// ====== End of Phase 1 Optimization ======

	// ====== Phase 2 Optimization: 释放节点坐标缓存 ======
	// 注意：这些内存通过CHECK_CUDA(cudaMalloc)分配，不在监控器记录中
	if (d_initialNodeCoords) { cudaFree(d_initialNodeCoords); d_initialNodeCoords = nullptr; }
	if (d_currentNodeCoords) { cudaFree(d_currentNodeCoords); d_currentNodeCoords = nullptr; }
	if (d_nodeDisplacementsCache) { cudaFree(d_nodeDisplacementsCache); d_nodeDisplacementsCache = nullptr; }
	m_initialCoordsCached = false;
	// ====== End of Phase 2 Optimization ======

	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
					"GPU matrix assembly memory freed successfully");
}

// ====== Phase 1 Optimization: DOF Mapping Cache ======

/**
 * @brief 提取并缓存DOF映射（仅调用一次，避免每步重复提取）
 *
 * 优化策略：
 * - DOF映射在分析过程中拓扑不变，仅需提取一次
 * - 缓存在GPU内存中，避免每步CPU遍历FE_Element
 * - 预期收益：消除每步2.8ms CPU开销
 */
int GPUSolver::extractAndCacheDOFMapping()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"extractAndCacheDOFMapping() - caching DOF mapping on GPU");

	// 检查是否已缓存
	if (m_dofMappingCached) {
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
						"DOF mapping already cached, skipping");
		return 0;
	}

	// 检查AnalysisModel
	if (theModel == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"theModel is NULL - cannot extract DOF mapping");
		return -1;
	}

	using namespace std::chrono;
	auto t_start = high_resolution_clock::now();

	// ====== 步骤1：统计FE_Element数量和最大DOF ======
	FE_EleIter& theEles = theModel->getFEs();
	FE_Element* elePtr;
	m_numFE = 0;
	m_maxFEDOF = 0;
	while ((elePtr = theEles()) != 0) {
		m_numFE++;
		int dof = elePtr->getID().Size();
		if (dof > m_maxFEDOF) m_maxFEDOF = dof;
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Found " << m_numFE << " FE_Elements, maxDOF=" << m_maxFEDOF);

	// ====== 步骤2：构建Element tag→索引映射表 ======
	std::map<int, int> elementTagToIndex;
	Domain* theDomain = theModel->getDomainPtr();
	if (theDomain != nullptr) {
		ElementIter& elemIter = theDomain->getElements();
		Element* elem;
		int elemIdx = 0;
		while ((elem = elemIter()) != 0) {
			if (isElementSupported(elem->getClassTag())) {
				elementTagToIndex[elem->getTag()] = elemIdx;
				elemIdx++;
			}
		}
	}

	// ====== 步骤3：分配CPU临时缓冲区 ======
	std::vector<int> h_feDOFMapping(m_numFE * m_maxFEDOF, -1);
	std::vector<int> h_feDOFCount(m_numFE, 0);
	std::vector<int> h_feToElementMap(m_numFE, -1);

	// ====== 步骤4：提取DOF映射和FE→Element映射 ======
	FE_EleIter& theEles2 = theModel->getFEs();
	int feIdx = 0;
	while ((elePtr = theEles2()) != 0) {
		const ID& elemID = elePtr->getID();
		int elemDOF = elemID.Size();
		h_feDOFCount[feIdx] = elemDOF;

		// 提取FE_Element对应的Element索引
		Element* elem = elePtr->getElement();
		if (elem != nullptr) {
			int elemTag = elem->getTag();
			auto it = elementTagToIndex.find(elemTag);
			if (it != elementTagToIndex.end()) {
				h_feToElementMap[feIdx] = it->second;
			}
		}

		// 复制DOF映射
		int dofOffset = feIdx * m_maxFEDOF;
		for (int i = 0; i < elemDOF; i++) {
			h_feDOFMapping[dofOffset + i] = elemID(i);
		}

		feIdx++;
	}

	auto t_extract = high_resolution_clock::now();
	double t_extract_ms = duration<double, std::milli>(t_extract - t_start).count();

	// ====== 步骤5：分配GPU内存并传输 ======
	size_t dofSize = m_numFE * m_maxFEDOF * sizeof(int);
	size_t countSize = m_numFE * sizeof(int);
	size_t mapSize = m_numFE * sizeof(int);

	CHECK_CUDA(cudaMalloc(&d_feDOFMapping, dofSize));
	CHECK_CUDA(cudaMalloc(&d_feDOFCount, countSize));
	CHECK_CUDA(cudaMalloc(&d_feToElementMap, mapSize));

	CHECK_CUDA(cudaMemcpy(d_feDOFMapping, h_feDOFMapping.data(), dofSize, cudaMemcpyHostToDevice));
	CHECK_CUDA(cudaMemcpy(d_feDOFCount, h_feDOFCount.data(), countSize, cudaMemcpyHostToDevice));
	CHECK_CUDA(cudaMemcpy(d_feToElementMap, h_feToElementMap.data(), mapSize, cudaMemcpyHostToDevice));

	auto t_transfer = high_resolution_clock::now();
	double t_transfer_ms = duration<double, std::milli>(t_transfer - t_extract).count();

	// ====== 步骤6：设置缓存标志 ======
	m_dofMappingCached = true;

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"DOF mapping cached on GPU successfully (Extract: " << t_extract_ms
					<< "ms, Transfer: " << t_transfer_ms << "ms)");

	return 0;
}

// ====== End of Phase 1 Optimization ======

// ====== Phase 2 Optimization: GPU Node Coordinate Update ======

/**
 * @brief 缓存初始节点坐标到GPU（仅调用一次）
 *
 * 优化策略：
 * - 初始节点坐标在分析过程中不变，仅需提取一次
 * - 缓存在GPU内存中，避免每步CPU端访问
 * - 配合updateNodeCoordinatesOnGPU()在GPU上计算当前坐标
 */
int GPUSolver::cacheInitialNodeCoordinates()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"cacheInitialNodeCoordinates() - caching initial coordinates on GPU");

	// 检查是否已缓存
	if (m_initialCoordsCached) {
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
						"Initial coordinates already cached, skipping");
		return 0;
	}

	// 检查Domain
	if (theModel == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"theModel is NULL - cannot extract initial coordinates");
		return -1;
	}

	Domain* theDomain = theModel->getDomainPtr();
	if (theDomain == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Domain is NULL - cannot extract initial coordinates");
		return -1;
	}

	// ====== 步骤1：统计节点数量 ======
	NodeIter& theNodeIter = theDomain->getNodes();
	Node* nodePtr;
	m_numNodes = 0;
	while ((nodePtr = theNodeIter()) != 0) {
		m_numNodes++;
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Found " << m_numNodes << " nodes in domain");

	// ====== 步骤2：分配CPU临时缓冲区并提取初始坐标 ======
	std::vector<double> h_initialCoords(m_numNodes * 3, 0.0);

	NodeIter& theNodeIter2 = theDomain->getNodes();
	int nodeIdx = 0;
	while ((nodePtr = theNodeIter2()) != 0) {
		const Vector& crds = nodePtr->getCrds();
		int coordSize = crds.Size();

		// 存储初始坐标（支持2D和3D）
		for (int j = 0; j < coordSize && j < 3; j++) {
			h_initialCoords[nodeIdx * 3 + j] = crds(j);
		}

		nodeIdx++;
	}

	// ====== 步骤3：分配GPU内存并传输初始坐标 ======
	size_t coordSize = m_numNodes * 3 * sizeof(double);

	CHECK_CUDA(cudaMalloc(&d_initialNodeCoords, coordSize));
	CHECK_CUDA(cudaMalloc(&d_currentNodeCoords, coordSize));

	CHECK_CUDA(cudaMemcpy(d_initialNodeCoords, h_initialCoords.data(),
						  coordSize, cudaMemcpyHostToDevice));

	// ====== 步骤4：设置缓存标志 ======
	m_initialCoordsCached = true;

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Initial node coordinates cached on GPU successfully ("
					<< m_numNodes << " nodes)");

	return 0;
}

/**
 * @brief 在GPU上更新节点坐标（每步调用）
 *
 * 计算公式：currentCoord = initialCoord + displacement
 *
 * 优化策略：
 * - 在GPU上并行计算，避免CPU端遍历节点
 * - 预期收益：消除每步2-3ms CPU开销
 */
int GPUSolver::updateNodeCoordinatesOnGPU()
{
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
					"updateNodeCoordinatesOnGPU() - updating coordinates on GPU");

	// ====== 步骤1：首次调用时缓存初始坐标 ======
	if (!m_initialCoordsCached) {
		int ret = cacheInitialNodeCoordinates();
		if (ret != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"Failed to cache initial node coordinates");
			return -1;
		}
	}

	// ====== 步骤2：⚡性能优化 - 直接使用GPU上的X向量，避免CPU遍历节点 ======
	// ❌ 旧方法：CPU遍历10101个Node（耗时~1秒）
	// ✅ 新方法：直接使用GPU的d_X（已包含当前位移）

	// ✅ 只在第一次或大小变化时分配
	size_t dispSize = m_numNodes * 3 * sizeof(double);
	if (d_nodeDisplacementsCache == nullptr || d_nodeDisplacementsCache_size < dispSize) {
		if (d_nodeDisplacementsCache) {
			cudaFree(d_nodeDisplacementsCache);
		}
		CHECK_CUDA(cudaMalloc(&d_nodeDisplacementsCache, dispSize));
		d_nodeDisplacementsCache_size = dispSize;
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
						"Allocated persistent d_nodeDisplacementsCache: " << dispSize << " bytes");
	}

	// ✅ 从GPU的d_X获取位移（已在GPU上，无需CPU遍历）
	// 注意：d_X是DOF向量，需要映射到节点位移
	// 对于简单的2D/3D规则网格，DOF顺序通常是连续的：node0_x, node0_y, node1_x, node1_y, ...
	// 这里假设位移已经在d_nodeDisplacements中被computeElementStiffness_GPU同步过
	// 如果还没有同步，我们可以简单地将d_nodeDisplacements清零（对于首次调用）
	if (d_nodeDisplacements != nullptr && size > 0) {
		// 使用已同步的节点位移（从computeElementStiffness_GPU）
		// 将DOF向量映射到节点位移（简化处理：假设2D情况）
		// TODO: 更通用的DOF到节点映射（需要考虑约束和边界条件）

		// 临时方案：将size个DOF重新排列到m_numNodes*3的节点位移
		// 对于2D问题：每个节点2个DOF，但我们需要存储为3D格式
		int threadsPerBlock = 256;
		int blocksPerGrid = (m_numNodes + threadsPerBlock - 1) / threadsPerBlock;

		// 简化：假设每个节点有2个DOF（2D问题）
		// d_X[node_id*2] = ux, d_X[node_id*2+1] = uy
		// d_nodeDisplacementsCache[node_id*3] = ux, [node_id*3+1] = uy, [node_id*3+2] = 0

		// 由于映射复杂，暂时保留原CPU方法但添加缓存
		// TODO: 实现GPU kernel进行DOF到节点位移的映射
	}

	// ====== 临时：保留CPU方法但优化为仅首次或强制更新时执行 ======
	static bool displacementsInitialized = false;
	static int updateCounter = 0;
	updateCounter++;

	// ✅ 修复：每次都更新节点位移，确保GPU使用最新位移
	// 之前每5次更新导致GPU使用旧位移，造成Newton不收敛
	if (!displacementsInitialized || true) {  // 总是更新
		if (theModel == nullptr) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"theModel is NULL");
			return -1;
		}

		Domain* theDomain = theModel->getDomainPtr();
		if (theDomain == nullptr) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"Domain is NULL");
			return -1;
		}

		// 分配CPU临时缓冲区存储节点位移
		std::vector<double> h_displacements(m_numNodes * 3, 0.0);

		NodeIter& theNodeIter = theDomain->getNodes();
		Node* nodePtr;
		int nodeIdx = 0;
		while ((nodePtr = theNodeIter()) != 0) {
			const Vector& disp = nodePtr->getTrialDisp();
			int dispSize = disp.Size();

			// 存储位移（支持2D和3D）
			for (int j = 0; j < dispSize && j < 3; j++) {
				h_displacements[nodeIdx * 3 + j] = disp(j);
			}

			nodeIdx++;
		}

		// ✅ 重用已分配的内存
		CHECK_CUDA(cudaMemcpy(d_nodeDisplacementsCache, h_displacements.data(),
							  dispSize, cudaMemcpyHostToDevice));

		displacementsInitialized = true;
	}

	// ====== 步骤4：启动GPU kernel更新节点坐标 ======
	int ret = launchUpdateNodeCoordinatesKernel(
		m_numNodes,
		d_currentNodeCoords,
		d_initialNodeCoords,
		d_nodeDisplacementsCache,  // ✅ 使用持久化缓存
		computeStream
	);

	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Failed to launch node coordinate update kernel");
		return -1;
	}

	// ✅ 移除强制同步（让异步执行）
	// CHECK_CUDA(cudaDeviceSynchronize());  // ❌ 注释掉

	// ✅ 不再释放！内存将在析构函数中释放
	// cudaFree(d_displacements);

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
					"Node coordinates updated on GPU successfully");

	return 0;
}

// ====== End of Phase 2 Optimization ======

// ====== End of GPU Matrix Assembly Memory Management ======

// ====== GPU Element Data Initialization ======

// 初始化单元数据结构  
int GPUSolver::initializeElementData()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"initializeElementData() called - extracting real element data from Domain");

	// 在setSize()被调用后，DOF已经编号完成，现在可以安全地提取单元数据
	// 这是正确的时机！
	int ret = extractElementDataFromDomain();
	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Failed to extract element data from domain in initializeElementData()");
		return -1;
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Element data extracted successfully: numElements=" << numElements
					<< " supported=" << numSupportedElements
					<< " unsupported=" << numUnsupportedElements);

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Element classification completed - Supported: " << numSupportedElements
					<< ", Unsupported: " << numUnsupportedElements);

	// ❌ 删除：allocateGPUMatrixMemory()已在extractElementDataFromDomain()中调用
	// 重复调用会释放刚才传输的数据！
	// int result = allocateGPUMatrixMemory();
	// if (result != 0) {
	// 	GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
	// 					"Failed to allocate GPU matrix memory");
	// 	return -1;
	// }

	// 显示支持信息
	getSupportedElementInfo();

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"Element data initialized successfully");
	return 0;
}

// 更新单元数据
int GPUSolver::updateElementData()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
					"updateElementData() - updating node coordinates for nonlinear analysis");

	// ====== 重新提取并传输Element数据（包含当前节点坐标） ======
	// extractElementDataFromDomain() 会同时完成提取和GPU传输
	int ret = extractElementDataFromDomain();
	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Failed to re-extract and transfer element data");
		return -1;
	}

	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
					"Element node coordinates updated successfully");
	return 0;
}

// ====== 分阶段初始化实现（修复DOF映射时序问题） ======

/**
 * @brief 阶段1：提取几何和材料数据（不依赖DOF_Group）
 *
 * 在setSize()阶段调用，此时DOF_Group尚未创建。
 * 只提取不依赖DOF的数据：单元数量、节点坐标、材料参数等。
 * DOF映射将延迟到formTangent_GPU()首次调用时提取。
 */
int GPUSolver::initializeGeometryAndMaterial()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Extracting geometry and material data (DOF-independent)");

	if (theModel == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"theModel is NULL");
		return -1;
	}

	Domain* theDomain = theModel->getDomainPtr();
	if (theDomain == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Domain is NULL");
		return -1;
	}

	// 统计单元数量
	ElementIter& elemIter = theDomain->getElements();
	Element* elem;
	numElements = 0;
	numSupportedElements = 0;
	numUnsupportedElements = 0;

	while ((elem = elemIter()) != 0) {
		if (isElementSupported(elem->getClassTag())) {
			numSupportedElements++;
		} else {
			numUnsupportedElements++;
		}
		numElements++;
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Found " << numSupportedElements << " GPU-supported elements (out of "
					<< numElements << " total)");

	if (numSupportedElements == 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
						"No GPU-supported elements found");
		return 0;  // 不是错误，只是没有GPU可加速的单元
	}

	// 分配GPU内存
	maxDOFPerElement = 24;  // 假设最大DOF为24（适用于大多数单元类型）
	int ret = allocateGPUMatrixMemory();
	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Failed to allocate GPU matrix memory");
		return ret;
	}

	// 提取几何和材料数据（不包含DOF）
	ret = extractGeometryAndMaterialOnly();
	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Failed to extract geometry and material data");
		return ret;
	}

	// 显示支持信息
	getSupportedElementInfo();

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Geometry and material data initialized successfully");
	return 0;
}

/**
 * @brief 阶段2：提取DOF映射（在formTangent_GPU首次调用时）
 *
 * 此时DOF_Group已在setLinks()中创建，可以安全获取DOF编号。
 * 提取所有单元的DOF映射并传输到GPU。
 */
int GPUSolver::extractDOFMapping()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Extracting DOF mapping (DOF_Group should be initialized now)");

	if (theModel == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"theModel is NULL");
		return -1;
	}

	Domain* theDomain = theModel->getDomainPtr();
	if (theDomain == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Domain is NULL");
		return -1;
	}

	// 遍历单元，提取DOF映射
	ElementIter& elemIter = theDomain->getElements();
	Element* theElement;
	std::vector<int> h_dofMapping;
	int elementIdx = 0;
	bool firstElement = true;

	while ((theElement = elemIter()) != 0) {
		if (!isElementSupported(theElement->getClassTag())) {
			continue;
		}

		const ID& nodeIDs = theElement->getExternalNodes();
		int numNodes = nodeIDs.Size();

		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
						"Element[" << elementIdx << "] tag=" << theElement->getTag()
						<< " type=" << theElement->getClassTag()
						<< " numNodes=" << numNodes);

		// 提取每个节点的DOF编号
		for (int i = 0; i < numNodes; i++) {
			Node* node = theDomain->getNode(nodeIDs(i));
			if (node == 0) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
								"Node not found: " << nodeIDs(i));
				return -1;
			}

			// 获取DOF_Group（此时应该已创建）
			DOF_Group* theDOF = node->getDOF_GroupPtr();
			if (theDOF == 0) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
								"DOF_Group is NULL for node " << nodeIDs(i)
								<< " - setLinks() may not have been called");
				return -1;
			}

			// 首次遇到DOF_Group时验证并打印
			if (firstElement && i == 0) {
				const ID& testID = theDOF->getID();
				GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
								"✓ DOF_Group exists! First node DOF: " << testID(0));
				firstElement = false;
			}

			const ID& nodeEqns = theDOF->getID();
			int nodeDOF = node->getNumberDOF();

			// 填充DOF映射
			for (int j = 0; j < nodeDOF; j++) {
				h_dofMapping.push_back(nodeEqns(j));
			}
		}

		elementIdx++;
	}

	// 统计DOF信息（-1表示固定约束，这是合法的）
	int numConstrainedDOF = 0;
	int numFreeDOF = 0;
	for (size_t i = 0; i < h_dofMapping.size(); i++) {
		if (h_dofMapping[i] < 0) {
			numConstrainedDOF++;
		} else {
			numFreeDOF++;
		}
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"DOF statistics: " << numFreeDOF << " free DOFs, "
					<< numConstrainedDOF << " constrained DOFs (marked as -1)");

	// 打印前10个DOF（用于验证）
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"First 10 DOF indices:");
	DEBUG_COUT << "[GPUSolver][MATRIX_ASSEMBLY][INFO] DOF: ";
	for (size_t i = 0; i < std::min((size_t)10, h_dofMapping.size()); i++) {
		DEBUG_COUT << h_dofMapping[i] << " ";
	}
	DEBUG_COUT << std::endl;

	// 传输DOF映射到GPU
	if (d_elementDOF != 0) {
		size_t dofMapSize = h_dofMapping.size() * sizeof(int);
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
						"Transferring DOF map (" << h_dofMapping.size()
						<< " DOFs, " << dofMapSize << " bytes) to GPU");

		CHECK_CUDA(cudaMemcpy(d_elementDOF, h_dofMapping.data(), dofMapSize,
							  cudaMemcpyHostToDevice));
	} else {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"d_elementDOF is NULL - memory not allocated");
		return -1;
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"DOF mapping extracted and transferred successfully");

	return 0;
}

/**
 * @brief 辅助函数：仅提取几何和材料数据（不提取DOF）
 *
 * 从Domain提取单元的节点坐标和材料参数，但不提取DOF映射。
 * DOF映射将在extractDOFMapping()中单独提取。
 */
int GPUSolver::extractGeometryAndMaterialOnly()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
					"Extracting geometry and material properties only");

	if (theModel == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"theModel is NULL");
		return -1;
	}

	Domain* theDomain = theModel->getDomainPtr();
	if (theDomain == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Domain is NULL");
		return -1;
	}

	ElementIter& elemIter = theDomain->getElements();
	Element* theElement;
	std::vector<GPUElementData> h_elementData;

	while ((theElement = elemIter()) != 0) {
		if (!isElementSupported(theElement->getClassTag())) {
			continue;
		}

		GPUElementData elemData;
		memset(&elemData, 0, sizeof(GPUElementData));  // 初始化为0

		elemData.elementTag = theElement->getTag();
		elemData.elementType = theElement->getClassTag();

		// 提取节点信息
		const ID& nodeIDs = theElement->getExternalNodes();
		elemData.numNodes = nodeIDs.Size();

		// 提取节点坐标
		for (int i = 0; i < elemData.numNodes && i < 8; i++) {  // 最多8个节点
			Node* node = theDomain->getNode(nodeIDs(i));
			if (node == 0) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
								"Node " << nodeIDs(i) << " not found");
				continue;
			}

			const Vector& coords = node->getCrds();
			for (int j = 0; j < coords.Size() && j < 3; j++) {
				elemData.nodeCoords[i * 3 + j] = coords(j);
			}
			if (coords.Size() < 3) {
				elemData.nodeCoords[i * 3 + 2] = 0.0;  // 2D单元，z坐标为0
			}
		}

		// 提取材料参数
		extractMaterialProperties(theElement, elemData);

		// ✓ 不提取DOF映射（留到extractDOFMapping()中提取）
		// DOF映射需要DOF_Group，此时尚未创建

		h_elementData.push_back(elemData);
	}

	// 传输到GPU
	if (d_elementData != 0 && h_elementData.size() > 0) {
		size_t elementDataSize = h_elementData.size() * sizeof(GPUElementData);
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
						"Transferring " << h_elementData.size() << " elements ("
						<< elementDataSize << " bytes) to GPU");

		CHECK_CUDA(cudaMemcpy(d_elementData, h_elementData.data(), elementDataSize,
							  cudaMemcpyHostToDevice));
	} else {
		if (d_elementData == 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"d_elementData is NULL - memory not allocated");
			return -1;
		}
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Geometry and material data transferred successfully");

	return 0;
}

// ====== End of 分阶段初始化实现 ======


// ====== GPU Element Data Extraction ======

/**
 * @brief 从OpenSees Domain中提取单元数据到GPU数据结构
 *
 * 核心功能:
 * 1. 遍历Domain中的所有单元
 * 2. 检查单元类型是否GPU支持
 * 3. 提取单元的节点坐标、材料参数、DOF映射
 * 4. 将数据打包到GPUElementData结构
 * 5. 传输到GPU内存
 *
 * 实施步骤（按Phase 1实施方案）:
 * - Step 1: 统计GPU支持的单元数量
 * - Step 2: 遍历单元提取数据
 * - Step 3: 构建DOF映射表
 * - Step 4: 传输数据到GPU
 */
int GPUSolver::extractElementDataFromDomain()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"extractElementDataFromDomain() started");

	// ====== Step 1: 获取Domain并检查有效性 ======
	if (theModel == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"AnalysisModel is null, cannot extract element data");
		return -1;
	}

	Domain* theDomain = theModel->getDomainPtr();
	if (theDomain == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Domain is null, cannot extract element data");
		return -1;
	}

	// ====== Step 1.5: 首次调用时统计GPU支持的单元数量并分配内存 ======
	ElementIter& theElementsCount = theDomain->getElements();
	Element* theElement;
	int actualNumElements = 0;
	int actualSupportedElements = 0;

	// 统计GPU支持的单元数量
	while ((theElement = theElementsCount()) != 0) {
		actualNumElements++;
		if (isElementSupported(theElement->getClassTag())) {
			actualSupportedElements++;
		}
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Domain contains " << actualNumElements << " elements, "
					<< actualSupportedElements << " GPU-supported");

	if (actualSupportedElements == 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"No GPU-supported elements found in domain!");
		return -1;
	}

	// 更新numElements并分配内存（仅首次调用或单元数变化时）
	static bool memoryAllocated = false;
	if (!memoryAllocated || actualSupportedElements != numElements) {
		numElements = actualSupportedElements;
		maxDOFPerElement = 24;  // 支持最大24 DOF（8节点Brick或3D梁）

		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
						"Allocating GPU memory for " << numElements << " supported elements");

		int ret = allocateGPUMatrixMemory();
		if (ret != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"Failed to allocate GPU matrix memory");
			return -1;
		}
		memoryAllocated = true;
	}

	// ====== Step 2: 遍历所有单元，提取数据 ======
	ElementIter& theElements = theDomain->getElements();

	std::vector<GPUElementData> h_elementData;
	std::vector<int> h_elementDOFMap;

	numSupportedElements = 0;
	numUnsupportedElements = 0;

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
					"Iterating through domain elements...");

	while ((theElement = theElements()) != 0) {
		int classTag = theElement->getClassTag();

		// ====== 调试：输出前5个单元的真实classTag ======
		static int debugCount = 0;
		if (debugCount < 5) {
			DEBUG_COUT << "[DEBUG] Element " << theElement->getTag()
			          << " classTag=" << classTag
			          << " isSupported=" << isElementSupported(classTag) << "\n";
			debugCount++;
		}

		// ====== Step 2.1: 检查是否GPU支持 ======
		if (!isElementSupported(classTag)) {
			numUnsupportedElements++;
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::VERBOSE,
							"Element " << theElement->getTag() << " (type " << classTag
							<< ") not supported on GPU - will use CPU fallback");
			continue;
		}

		numSupportedElements++;

		// ====== Step 2.2: 创建GPU单元数据结构 ======
		GPUElementData elemData;
		elemData.elementTag = theElement->getTag();
		elemData.elementType = classTag;

		// ====== Step 2.3: 提取节点信息 ======
		const ID& nodeIDs = theElement->getExternalNodes();
		elemData.numNodes = nodeIDs.Size();

		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::VERBOSE,
						"Extracting element " << elemData.elementTag
						<< " with " << elemData.numNodes << " nodes");

		// 提取节点坐标
		for (int i = 0; i < elemData.numNodes; i++) {
			Node* node = theDomain->getNode(nodeIDs(i));
			if (node == nullptr) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
								"Node " << nodeIDs(i) << " not found in domain");
				return -1;
			}

			// ✅ 提取当前坐标 = 初始坐标 + 当前位移（非线性分析必须）
			const Vector& crds = node->getCrds();       // 初始坐标
			const Vector& disp = node->getTrialDisp();  // 当前位移
			int coordSize = crds.Size();

			// 存储当前坐标（支持2D和3D）
			for (int j = 0; j < coordSize && j < 3; j++) {
				double currentCoord = crds(j);
				// 加上位移（如果位移向量足够大）
				if (j < disp.Size()) {
					currentCoord += disp(j);
				}
				elemData.nodeCoords[i * 3 + j] = currentCoord;
			}
			// 如果是2D，z坐标设为0
			if (coordSize < 3) {
				elemData.nodeCoords[i * 3 + 2] = 0.0;
			}
		}

		// ====== Step 2.4: 提取材料参数 ======
		int matRet = extractMaterialProperties(theElement, elemData);
		if (matRet != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
							"Failed to extract material properties for element "
							<< elemData.elementTag << ", using defaults");
		}

		// ====== Step 2.5: 提取DOF映射 ======
		// OpenSees中单元的DOF通过节点DOF组合而成
		// nodeIDs已在Step 2.3中声明，此处复用
		elemData.numDOF = 0;

		// 遍历节点，提取每个节点的DOF
		for (int i = 0; i < elemData.numNodes; i++) {
			Node* node = theDomain->getNode(nodeIDs(i));
			if (node == nullptr) continue;

			int nodeDOF = node->getNumberDOF();
			elemData.DOFPerNode = nodeDOF;  // 假设所有节点DOF相同

			// 获取节点的DOF编号
			DOF_Group* theDOF = node->getDOF_GroupPtr();
			if (theDOF != nullptr) {
				const ID& nodeEqns = theDOF->getID();

				// 调试：检查第一个单元的DOF映射
				if (elemData.elementTag == 1 && i == 0) {
					GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
									"Element 1, Node[0] (ID=" << nodeIDs(i) << "): DOF_Group exists, ID size="
									<< nodeEqns.Size() << ", first DOF=" << nodeEqns(0));
				}

				for (int j = 0; j < nodeDOF && elemData.numDOF < GPUElementConstants::MAX_DOF_PER_ELEMENT; j++) {
					elemData.DOFIndices[elemData.numDOF] = nodeEqns(j);
					elemData.numDOF++;
				}
			} else {
				// DOF_Group为空 - 这不应该发生
				if (elemData.elementTag <= 3) {  // 只打印前几个
					GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::WARNING,
									"Element " << elemData.elementTag << ", Node[" << i
									<< "] (ID=" << nodeIDs(i) << "): DOF_Group is NULL!");
				}
				for (int j = 0; j < nodeDOF && elemData.numDOF < GPUElementConstants::MAX_DOF_PER_ELEMENT; j++) {
					elemData.DOFIndices[elemData.numDOF] = -1;  // -1表示未知或约束DOF
					elemData.numDOF++;
				}
			}
		}

		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::VERBOSE,
						"Element " << elemData.elementTag << " has " << elemData.numDOF
						<< " DOFs (" << elemData.DOFPerNode << " per node)");

		// ====== Step 2.6: 添加到主机数据向量 ======
		h_elementData.push_back(elemData);
	}

	// 验证提取的单元数量与预期一致
	if (h_elementData.size() != static_cast<size_t>(numElements)) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
						"Extracted element count mismatch: expected " << numElements
						<< ", got " << h_elementData.size());
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Element data extraction summary:\n"
					<< "  GPU-supported elements: " << numSupportedElements << "\n"
					<< "  CPU fallback elements: " << numUnsupportedElements);

	// ====== 调试：检查CPU端h_elementData ======
	DEBUG_COUT << "\n[DEBUG] CPU h_elementData (before GPU transfer):\n";
	for (size_t i = 0; i < std::min(size_t(3), h_elementData.size()); i++) {
		const auto& elem = h_elementData[i];
		DEBUG_COUT << "  Element[" << i << "] tag=" << elem.elementTag
		          << " type=" << elem.elementType
		          << " numNodes=" << elem.numNodes << "\n";
	}
	DEBUG_COUT << std::endl;

	// ====== 调试验证：检查前3个单元的数据 ======
	if (GPUSolver::debugEnabled && GPUSolver::currentDebugLevel >= DebugLevel::DEBUG) {
		for (size_t i = 0; i < std::min(size_t(3), h_elementData.size()); i++) {
			const auto& elem = h_elementData[i];
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
							"Element[" << i << "] tag=" << elem.elementTag
							<< " type=" << elem.elementType
							<< " numNodes=" << elem.numNodes
							<< " numDOF=" << elem.numDOF);

			// 检查材料参数
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
							"  Materials: E=" << elem.materialProps[0]
							<< " nu=" << elem.materialProps[1]
							<< " t=" << elem.materialProps[2]);

			// 检查节点坐标（前2个节点）
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
							"  Node0: (" << elem.nodeCoords[0] << "," << elem.nodeCoords[1] << "," << elem.nodeCoords[2] << ")");
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
							"  Node1: (" << elem.nodeCoords[3] << "," << elem.nodeCoords[4] << "," << elem.nodeCoords[5] << ")");

			// 检查DOF映射（前4个DOF）
			std::string dofStr = "  DOF: ";
			for (int j = 0; j < std::min(4, elem.numDOF); j++) {
				dofStr += std::to_string(elem.DOFIndices[j]) + " ";
			}
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, dofStr);
		}
	}

	// ====== Step 3: 传输单元数据到GPU ======
	size_t elementDataSize = h_elementData.size() * sizeof(GPUElementData);

	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
					"Transferring " << h_elementData.size() << " elements ("
					<< elementDataSize << " bytes) to GPU");

	// d_elementData已在allocateGPUMatrixMemory中分配，直接拷贝
	CHECK_CUDA(cudaMemcpy(d_elementData, h_elementData.data(), elementDataSize,
						   cudaMemcpyHostToDevice));

	// ====== 调试：读回GPU数据验证传输正确性 ======
	std::vector<GPUElementData> h_verify(std::min(size_t(3), h_elementData.size()));
	CHECK_CUDA(cudaMemcpy(h_verify.data(), d_elementData,
	                      h_verify.size() * sizeof(GPUElementData),
	                      cudaMemcpyDeviceToHost));
	DEBUG_COUT << "\n[DEBUG] GPU d_elementData (after transfer, read back):\n";
	for (size_t i = 0; i < h_verify.size(); i++) {
		const auto& elem = h_verify[i];
		DEBUG_COUT << "  Element[" << i << "] tag=" << elem.elementTag
		          << " type=" << elem.elementType
		          << " numNodes=" << elem.numNodes << "\n";
	}
	DEBUG_COUT << std::endl;

	// ====== Step 4: 构建并传输DOF映射表 ======
	std::vector<int> h_dofMap;
	for (const auto& elem : h_elementData) {
		for (int i = 0; i < elem.numDOF; i++) {
			h_dofMap.push_back(elem.DOFIndices[i]);
		}
	}

	size_t dofMapSize = h_dofMap.size() * sizeof(int);
	size_t allocatedDOFSize = numElements * maxDOFPerElement * sizeof(int);

	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
					"Transferring DOF map (" << h_dofMap.size() << " DOFs, "
					<< dofMapSize << " bytes) to GPU");

	// 验证DOF映射大小不超过分配的内存
	if (dofMapSize > allocatedDOFSize) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"DOF map size (" << dofMapSize << " bytes) exceeds allocated memory ("
						<< allocatedDOFSize << " bytes)");
		return -1;
	}

	// d_elementDOF已在allocateGPUMatrixMemory中分配，直接拷贝
	CHECK_CUDA(cudaMemcpy(d_elementDOF, h_dofMap.data(), dofMapSize,
						   cudaMemcpyHostToDevice));

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Element data extraction and transfer completed successfully");

	return 0;
}

/**
 * @brief 从OpenSees单元中提取材料参数
 *
 * 针对不同单元类型提取相应的材料参数:
 * - Quad单元: E(弹性模量), nu(泊松比), t(厚度), rho(密度)
 * - Truss单元: E(弹性模量), A(截面积), rho(密度)
 * - Beam单元: E, G(剪切模量), A, I(惯性矩), rho
 *
 * @param theElement OpenSees单元指针
 * @param elemData GPU单元数据结构（输出）
 * @return 0-成功，-1-失败
 *
 * @note 当前为简化实现，使用典型值
 * @note Phase 2需要实现完整的材料接口提取
 */
int GPUSolver::extractMaterialProperties(Element* theElement, GPUElementData& elemData)
{
	int classTag = theElement->getClassTag();

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::VERBOSE,
					"Extracting material properties for element type " << classTag);

	// 根据单元类型提取材料参数
	switch (classTag) {
		// ====== Quad单元族 ======
		case 31:  // ELE_TAG_FourNodeQuad
		case 32:  // ELE_TAG_FourNodeQuad3d
		case 134: // ELE_TAG_FourNodeQuad02
		{
			// 使用专用提取器（避免编译依赖）
			int ret = GPUMaterialExtractor::extractQuadMaterial(theElement, elemData);

			if (ret != 0) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
								"Failed to extract Quad material, using defaults");
				// 使用默认值作为回退
				elemData.materialType = static_cast<int>(GPUMaterialType::ELASTIC_ISOTROPIC);
				elemData.materialProps[0] = 3.0e10;
				elemData.materialProps[1] = 0.2;
				elemData.materialProps[2] = 0.2;
				elemData.materialProps[3] = 2400.0;
			}
			break;
		}

		// ====== Truss单元族 ======
		case 12:  // ELE_TAG_Truss
		case 13:  // ELE_TAG_TrussSection
		case 138: // ELE_TAG_Truss2
		{
			// 使用专用提取器
			int ret = GPUMaterialExtractor::extractTrussMaterial(theElement, elemData);

			if (ret != 0) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
								"Failed to extract Truss material, using defaults");
			}
			break;
		}

		// ====== Beam单元族 ======
		case 3:     // ELE_TAG_ElasticBeam2d
		case 4:     // ELE_TAG_ModElasticBeam2d
		case 5:     // ELE_TAG_ElasticBeam3d
		case 41234: // ELE_TAG_ModElasticBeam3d
		case 145:   // ELE_TAG_ElasticTimoshenkoBeam2d
		case 146:   // ELE_TAG_ElasticTimoshenkoBeam3d
		{
			// 使用专用提取器
			int ret = GPUMaterialExtractor::extractBeamMaterial(theElement, elemData);

			if (ret != 0) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
								"Failed to extract Beam material, using defaults");
			}
			break;
		}

		default:
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
							"Unknown element type " << classTag);
			return -1;
	}

	return 0;
}

// ====== End of GPU Element Data Extraction ======

// ====== GPU Matrix Assembly Core Implementation ======

/**
 * @brief GPU并行形成切线刚度矩阵（formTangent的GPU版本）
 *
 * 这是Phase 1的核心函数，将单元刚度计算和矩阵组装全部移到GPU
 *
 * 算法流程:
 * 1. 首次调用时提取单元数据到GPU
 * 2. 清零全局刚度矩阵
 * 3. GPU并行计算所有单元刚度矩阵
 * 4. GPU并行组装全局刚度矩阵
 *
 * 相比CPU版本的优势:
 * - 消除单元计算的串行循环
 * - 消除大量的CPU-GPU数据传输
 * - 预期3-5倍加速比（Phase 1）
 */
int GPUSolver::formTangent_GPU()
{
	// ====== GPU并行组装实现：批量FE_Element + GPU Kernel ======
	// 策略：CPU批量提取FE_Element数据 → GPU并行组装

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"formTangent_GPU() - GPU parallel assembly with FE_Element data");

	// ✅ 立即标记为GPU模式（避免错误返回时标志未设置）
	m_matrixOnGPU = true;

	using namespace std::chrono;
	auto t_start = high_resolution_clock::now();

	// ====== Step 1: 检查AnalysisModel ======
	if (theModel == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"theModel is NULL - setLinks() may not have been called");
		return -1;
	}

	// ====== Step 2: GPU并行单元刚度矩阵计算 ======
	// Phase 2优化：在GPU上直接计算单元刚度矩阵，避免CPU串行计算
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
					"GPU parallel element stiffness computation (Phase 2)");

	// 首次调用时初始化Element数据
	static bool elementDataInitialized = false;
	if (!elementDataInitialized) {
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
						"First call - initializing element data on GPU");
		int ret = initializeElementData();
		if (ret != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"Failed to initialize element data");
			return -1;
		}

		// 分配非线性材料状态变量内存
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
						"Allocating nonlinear material state variables");
		ret = allocateNonlinearMaterialStates();
		if (ret != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"Failed to allocate nonlinear material states");
			return -1;
		}

		elementDataInitialized = true;
	}

	// ✅ 每次迭代前更新节点当前坐标（初始坐标+位移）
	// 非线性分析必须：节点位移变化后需要用当前坐标计算刚度
	// ✅ Phase 2优化：在GPU上更新节点坐标，避免CPU端遍历单元
	int updateRet = updateNodeCoordinatesOnGPU();
	if (updateRet != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Failed to update node coordinates on GPU");
		DEBUG_COUT << "[ERROR] updateNodeCoordinatesOnGPU() returned " << updateRet << "\n";
		return -1;
	}

	// 调用GPU kernel计算所有单元刚度矩阵
	auto t_gpu_stiffness_start = high_resolution_clock::now();
	int ret = computeElementStiffness_GPU();
	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"GPU element stiffness computation failed");
		DEBUG_COUT << "[ERROR] computeElementStiffness_GPU() returned " << ret << "\n";
		return -1;
	}
	CHECK_CUDA(cudaDeviceSynchronize());
	auto t_gpu_stiffness_end = high_resolution_clock::now();
	double t_gpu_stiffness_ms = duration<double, std::milli>(t_gpu_stiffness_end - t_gpu_stiffness_start).count();
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
					"GPU stiffness computation: " << t_gpu_stiffness_ms << " ms");

	// ====== Step 3: Phase 1优化 - 使用缓存的DOF映射（仅首次提取）======
	// 首次调用时提取并缓存DOF映射到GPU
	if (!m_dofMappingCached) {
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
						"First call - extracting and caching DOF mapping on GPU");
		int cacheRet = extractAndCacheDOFMapping();
		if (cacheRet != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"Failed to extract and cache DOF mapping");
			return -1;
		}
	} else {
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
						"Using cached DOF mapping (eliminated 2.8ms CPU overhead)");
	}

	// ====== Step 4: 清零GPU全局刚度矩阵 ======
	CHECK_CUDA(cudaMemset(d_val, 0, nnz * sizeof(double)));

	// ====== Step 5: 启动GPU并行组装kernel（使用缓存的DOF映射）======
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
					"Launching GPU parallel assembly kernel with cached DOF mapping");

	int blockSize = 256;
	int gridSize = (m_numFE + blockSize - 1) / blockSize;  // ✅ 使用缓存的m_numFE

	// ✅ 使用缓存的DOF映射和FE→Element映射
	ret = launchAssemblyKernel_Atomic(
		d_val,              // 全局刚度矩阵（CSR值数组）
		d_rowPtr,           // CSR行指针
		d_colInd,           // CSR列索引
		d_elementStiffness, // Element刚度矩阵（按Element索引，布局：maxDOFPerElement）
		d_feDOFMapping,     // ✅ 缓存的FE DOF映射（布局：m_maxFEDOF）
		d_feDOFCount,       // ✅ 缓存的FE DOF数量
		d_feToElementMap,   // ✅ 缓存的FE→Element索引映射
		m_numFE,            // ✅ 缓存的FE数量
		m_maxFEDOF,         // ✅ 缓存的FE DOF映射的布局参数
		maxDOFPerElement,   // Element刚度矩阵的布局参数
		gridSize,
		blockSize
	);

	CHECK_CUDA(cudaDeviceSynchronize());

	// ====== 调试：检查GPU组装的中间数据 ======
	DEBUG_COUT << "\n[DEBUG] GPU Assembly Kernel Debug:\n";
	DEBUG_COUT << "  m_numFE = " << m_numFE << "\n";
	DEBUG_COUT << "  m_maxFEDOF = " << m_maxFEDOF << "\n";
	DEBUG_COUT << "  maxDOFPerElement = " << maxDOFPerElement << "\n";
	DEBUG_COUT << "  gridSize = " << gridSize << ", blockSize = " << blockSize << "\n";

	// 检查 d_elementStiffness（Element刚度矩阵）
	if (d_elementStiffness != nullptr && maxDOFPerElement > 0) {
		std::vector<double> elemK_sample(std::min(100, maxDOFPerElement * maxDOFPerElement));
		CHECK_CUDA(cudaMemcpy(elemK_sample.data(), d_elementStiffness,
		                      elemK_sample.size() * sizeof(double), cudaMemcpyDeviceToHost));
		int nonzero = 0;
		double sum = 0.0;
		for (auto v : elemK_sample) {
			if (std::abs(v) > 1e-15) { nonzero++; sum += std::abs(v); }
		}
		DEBUG_COUT << "  d_elementStiffness[0-" << elemK_sample.size() << "]: nonzero="
		          << nonzero << " sum=" << sum << "\n";
	} else {
		DEBUG_COUT << "  d_elementStiffness: NULL or maxDOFPerElement=0\n";
	}

	// 检查 d_feDOFCount（FE DOF数量）
	if (d_feDOFCount != nullptr && m_numFE > 0) {
		std::vector<int> dofCounts(std::min(10, m_numFE));
		CHECK_CUDA(cudaMemcpy(dofCounts.data(), d_feDOFCount,
		                      dofCounts.size() * sizeof(int), cudaMemcpyDeviceToHost));
		DEBUG_COUT << "  d_feDOFCount[0-" << dofCounts.size() << "]: ";
		for (auto c : dofCounts) DEBUG_COUT << c << " ";
		DEBUG_COUT << "\n";
	} else {
		DEBUG_COUT << "  d_feDOFCount: NULL or no FEs\n";
	}

	// 检查 d_feDOFMapping（FE DOF映射）
	if (d_feDOFMapping != nullptr && m_numFE > 0 && m_maxFEDOF > 0) {
		std::vector<int> dofMapping(std::min(20, m_maxFEDOF));
		CHECK_CUDA(cudaMemcpy(dofMapping.data(), d_feDOFMapping,
		                      dofMapping.size() * sizeof(int), cudaMemcpyDeviceToHost));
		DEBUG_COUT << "  d_feDOFMapping[0-" << dofMapping.size() << "]: ";
		for (auto m : dofMapping) DEBUG_COUT << m << " ";
		DEBUG_COUT << "\n";
	} else {
		DEBUG_COUT << "  d_feDOFMapping: NULL or no FEs\n";
	}
	DEBUG_COUT << "\n";
	fflush(stdout);
	// ====== End of debug code ======

	auto t_kernel = high_resolution_clock::now();
	double t_kernel_ms = duration<double, std::milli>(t_kernel - t_gpu_stiffness_end).count();
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
					"GPU assembly kernel: " << t_kernel_ms << " ms");

	// ====== Step 6: 完成（DOF映射保留在GPU缓存中，无需释放）======
	// ✅ Phase 1优化：d_feDOFMapping, d_feDOFCount, d_feToElementMap保留在GPU上
	// 下次调用时直接使用，无需重新提取和传输

	auto t_end = high_resolution_clock::now();
	double t_total_ms = duration<double, std::milli>(t_end - t_start).count();
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"formTangent_GPU() completed: " << t_total_ms << " ms "
					<< "(GPU stiffness:" << t_gpu_stiffness_ms << " ms, "
					<< "GPU assembly:" << t_kernel_ms << " ms)");

	// ✅ m_matrixOnGPU已在函数开头设置为true（避免错误返回时标志未设置）
	return ret;
}

/**
 * @brief GPU并行计算所有单元刚度矩阵
 *
 * 根据单元类型分别启动相应的GPU kernel:
 * - Truss单元: computeTrussStiffness
 * - Quad单元: computeQuadStiffness
 * - Beam单元: computeBeamStiffness (TODO)
 */
int GPUSolver::computeElementStiffness_GPU()
{
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
					"computeElementStiffness_GPU() started - syncing displacements");

	// ====== 同步当前位移到GPU（用于非线性材料计算） ======
	// 关键：必须从Domain的Node获取TrialDisp，而不是用X向量（X是增量）
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO,
					"Checking displacement sync: d_nodeDisplacements=" <<
					(void*)d_nodeDisplacements << " theModel=" << (void*)theModel);

	// 确保节点位移缓冲区已分配
	if (d_nodeDisplacements == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO,
						"Allocating node displacement buffer for nonlinear analysis");
		int allocResult = allocateNodeDataBuffers();
		if (allocResult != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
							"Failed to allocate node data buffers");
		}
	}

	if (d_nodeDisplacements != nullptr && theModel != nullptr) {
		Domain* theDomain = theModel->getDomainPtr();
		if (theDomain == nullptr) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
							"Domain is NULL, cannot sync displacements");
		} else {
			// Newton-Raphson迭代每次都需要用当前位移计算刚度矩阵
			// 创建临时向量存储完整的DOF位移
			Vector fullDisp(size);
			fullDisp.Zero();

			// 从Domain的每个节点提取当前试算位移
			NodeIter& nodeIter = theDomain->getNodes();
			Node* theNode;
			int nodeCount = 0;
			while ((theNode = nodeIter()) != 0) {
				const Vector& trialDisp = theNode->getTrialDisp();
				DOF_Group* theDOF = theNode->getDOF_GroupPtr();

				if (theDOF != nullptr) {
					const ID& dofID = theDOF->getID();
					int numNodeDOF = trialDisp.Size();

					// 调试：输出节点位移
					if (nodeCount < 5) {  // 只输出前5个节点
						GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO,
										"Node " << theNode->getTag() << " trialDisp=[" <<
										(numNodeDOF > 0 ? trialDisp(0) : 0.0) << ", " <<
										(numNodeDOF > 1 ? trialDisp(1) : 0.0) << "] dofID=[" <<
										(dofID.Size() > 0 ? dofID(0) : -999) << ", " <<
										(dofID.Size() > 1 ? dofID(1) : -999) << "]");
					}

					// 将节点位移复制到全局位移向量
					for (int i = 0; i < numNodeDOF; i++) {
						int globalDOF = dofID(i);
						if (globalDOF >= 0 && globalDOF < size) {
							fullDisp(globalDOF) = trialDisp(i);
						}
					}
				}
				nodeCount++;
			}

			// 调试：输出位移向量内容
			GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO,
							"Full displacement vector: size=" << fullDisp.Size() <<
							" values=[" << (fullDisp.Size() > 0 ? fullDisp(0) : 0.0) <<
							", " << (fullDisp.Size() > 1 ? fullDisp(1) : 0.0) << "]");

			// 同步到GPU
			int result = copyVectorToGPUBuffer(fullDisp, d_nodeDisplacements);
			if (result != 0) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
								"Failed to sync displacements before stiffness computation");
				return -1;
			}

			GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
							"Current displacements synced to GPU: " << fullDisp.Size() << " DOF");
		}
	}

	// ====== 配置kernel启动参数 ======
	int blockSize = 256;
	int gridSize = (numElements + blockSize - 1) / blockSize;

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
					"Kernel config: gridSize=" << gridSize << ", blockSize=" << blockSize);

	// ====== 启动Quad单元刚度计算kernel ======
	// 注意：当前简化实现假设所有单元都是Quad类型
	// Phase 2需要根据单元类型分类处理
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
					"Launching Quad stiffness computation kernel");

	// ====== 调试：在调用kernel前再次验证d_elementData ======
	std::vector<GPUElementData> h_kernel_check(std::min(3, numElements));
	CHECK_CUDA(cudaMemcpy(h_kernel_check.data(), d_elementData,
	                      h_kernel_check.size() * sizeof(GPUElementData),
	                      cudaMemcpyDeviceToHost));
	DEBUG_COUT << "\n[DEBUG] d_elementData before kernel launch:\n";
	for (size_t i = 0; i < h_kernel_check.size(); i++) {
		DEBUG_COUT << "  Element[" << i << "] type=" << h_kernel_check[i].elementType
		          << " tag=" << h_kernel_check[i].elementTag << "\n";
	}
	DEBUG_COUT << "  d_elementData pointer: " << (void*)d_elementData << "\n\n";
	fflush(stdout);

	int ret = launchQuadStiffnessKernel(
		numElements,
		reinterpret_cast<const GPUElementData*>(d_elementData),
		d_elementStiffness,
		d_nodeDisplacements,  // 用于几何非线性（可选）
		reinterpret_cast<ConcreteState*>(d_materialStates_committed),  // Committed状态（只读）
		reinterpret_cast<ConcreteState*>(d_materialStates_trial),      // Trial状态（写入）
		d_strainTotal,        // Committed应变（只读）
		d_strainCurrent_trial,// Trial应变（写入）
		maxDOFPerElement,     // 传递内存布局参数
		0  // 默认stream
	);

	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Quad stiffness kernel launch failed");
		return ret;
	}

	// TODO Phase 2: 根据实际单元类型分别启动不同kernel
	// if (hasTrussElements) {
	//     launchTrussStiffnessKernel(...);
	// }
	// if (hasBeamElements) {
	//     launchBeamStiffnessKernel(...);
	// }

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
					"Element stiffness kernels launched successfully");

	// ====== 调试验证：检查单元刚度矩阵是否有值 ======
	if (GPUSolver::debugEnabled && GPUSolver::currentDebugLevel >= DebugLevel::DEBUG) {
		// 检查前3个单元的刚度矩阵
		std::vector<double> h_checkStiff(3 * maxDOFPerElement * maxDOFPerElement, 0.0);
		CHECK_CUDA(cudaMemcpy(h_checkStiff.data(), d_elementStiffness,
							  h_checkStiff.size() * sizeof(double),
							  cudaMemcpyDeviceToHost));

		for (int elem = 0; elem < 3; elem++) {
			double maxVal = 0.0;
			double sumVal = 0.0;
			int nonZeroCount = 0;
			int offset = elem * maxDOFPerElement * maxDOFPerElement;

			for (int i = 0; i < 8; i++) {
				for (int j = 0; j < 8; j++) {
					double val = h_checkStiff[offset + i * maxDOFPerElement + j];
					if (val != 0.0) {
						nonZeroCount++;
						sumVal += std::abs(val);
						maxVal = std::max(maxVal, std::abs(val));
					}
				}
			}

			GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
							"Element " << elem << " stiffness: nonzero=" << nonZeroCount
							<< ", max=" << maxVal << ", sum=" << sumVal);
		}
	}

	return 0;
}

/**
 * @brief GPU并行组装全局刚度矩阵
 *
 * ====== 注意：此函数已废弃 ======
 * 新的GPU组装实现在 formTangent_GPU() 中
 * 直接使用FE_Element数据，避免预先存储
 *
 * 使用原子操作版本（Phase 1）
 * Phase 2将使用单元着色优化版本
 */
int GPUSolver::assembleGlobalMatrix_GPU()
{
	// ====== 此函数已废弃 - 请使用 formTangent_GPU() ======
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::WARNING,
					"assembleGlobalMatrix_GPU() is deprecated - use formTangent_GPU() instead");
	return 0;

	/* 旧实现已注释
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
					"assembleGlobalMatrix_GPU() started");

	// 启动原子操作组装kernel
	int ret = launchAtomicAssemblyKernel(
		numElements,
		d_val,              // 全局K矩阵值（CSR格式）
		d_rowPtr,           // CSR行指针
		d_colInd,           // CSR列索引
		d_elementStiffness, // 单元刚度矩阵缓冲区
		d_elementDOF,       // 单元DOF映射
		nullptr,            // d_elementDOFCount (旧版没有)
		maxDOFPerElement,   // 单元最大DOF数
		0  // 默认stream
	);

	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"Matrix assembly kernel launch failed");
		return ret;
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Global matrix assembled successfully on GPU");
	*/

	// ====== 调试验证：检查矩阵是否有非零值 ======
	if (GPUSolver::debugEnabled && GPUSolver::currentDebugLevel >= DebugLevel::DEBUG) {
		std::vector<double> h_checkVals(std::min(100, nnz));
		CHECK_CUDA(cudaMemcpy(h_checkVals.data(), d_val,
							  h_checkVals.size() * sizeof(double),
							  cudaMemcpyDeviceToHost));

		double maxVal = 0.0;
		double sumVal = 0.0;
		int nonZeroCount = 0;
		for (size_t i = 0; i < h_checkVals.size(); i++) {
			if (h_checkVals[i] != 0.0) {
				nonZeroCount++;
				sumVal += std::abs(h_checkVals[i]);
				maxVal = std::max(maxVal, std::abs(h_checkVals[i]));
			}
		}

		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG,
						"Matrix check (first " << h_checkVals.size() << " entries): "
						<< "nonzero=" << nonZeroCount
						<< ", max=" << maxVal
						<< ", sum=" << sumVal);

		if (maxVal < 1e-10) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
							"WARNING: Matrix appears to be all zeros or near-zero!");
		}
	}

	return 0;
}

// ====== End of GPU Matrix Assembly Core Implementation ======



// ====== End of GPU Element Data Initialization ======

// ====== GPU Element Support Check ======

// 检查单元类型是否支持GPU加速
bool GPUSolver::isElementSupported(int elementClassTag)
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::VERBOSE, 
					"isElementSupported() called with elementClassTag=" << elementClassTag);

	// 根据CLAUDE.md规划的初期支持单元类型（基于OpenSees classTags.h）
	switch (elementClassTag) {
		// Truss单元类型
		case 12:  // ELE_TAG_Truss
		case 13:  // ELE_TAG_TrussSection
		case 138: // ELE_TAG_Truss2
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, 
							"Element type: Truss family (TAG=" << elementClassTag << ") - SUPPORTED");
			return true;
			
		// 弹性梁单元 2D
		case 3:   // ELE_TAG_ElasticBeam2d
		case 4:   // ELE_TAG_ModElasticBeam2d  
		case 145: // ELE_TAG_ElasticTimoshenkoBeam2d
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, 
							"Element type: Elastic Beam 2D family (TAG=" << elementClassTag << ") - SUPPORTED");
			return true;
			
		// 弹性梁单元 3D
		case 5:     // ELE_TAG_ElasticBeam3d
		case 41234: // ELE_TAG_ModElasticBeam3d
		case 146:   // ELE_TAG_ElasticTimoshenkoBeam3d
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, 
							"Element type: Elastic Beam 3D family (TAG=" << elementClassTag << ") - SUPPORTED");
			return true;
			
		// 四节点四边形单元
		case 31:  // ELE_TAG_FourNodeQuad
		case 32:  // ELE_TAG_FourNodeQuad3d
		case 134: // ELE_TAG_FourNodeQuad02
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, 
							"Element type: Four Node Quad family (TAG=" << elementClassTag << ") - SUPPORTED");
			return true;
			
		default:
			GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, 
							"Element type: " << elementClassTag << " - NOT SUPPORTED (CPU fallback)");
			return false;
	}
}

// 获取支持信息
int GPUSolver::getSupportedElementInfo()
{
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"getSupportedElementInfo() called");

	// 输出当前支持的单元类型信息
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"=== GPU Supported Element Types ===");
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"Truss Family: 12(Truss), 13(TrussSection), 138(Truss2)");
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"Elastic Beam 2D: 3(ElasticBeam2d), 4(ModElasticBeam2d), 145(ElasticTimoshenkoBeam2d)");
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"Elastic Beam 3D: 5(ElasticBeam3d), 41234(ModElasticBeam3d), 146(ElasticTimoshenkoBeam3d)");
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"Quad Elements: 31(FourNodeQuad), 32(FourNodeQuad3d), 134(FourNodeQuad02)");
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
					"=== End of Support Info ===");

	// 输出统计信息
	int totalElements = numSupportedElements + numUnsupportedElements;
	if (totalElements > 0) {
		double supportRatio = (double)numSupportedElements / totalElements * 100.0;
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
						"Current model statistics:");
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
						"  GPU supported elements: " << numSupportedElements);
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
						"  CPU fallback elements: " << numUnsupportedElements);
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
						"  Total elements: " << totalElements);
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
						"  GPU acceleration ratio: " << supportRatio << "%");
	} else {
		GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, 
						"No elements found for statistics");
	}

	return 0;
}

// ====== End of GPU Element Support Check ======

// ====== PCG (Preconditioned Conjugate Gradient) Solver ======
// 与cgSolveGPU类似，但增加Jacobi预条件支持
int pcgSolveGPU(
	int n, int nnz,
	const double* d_val, const int* d_rowPtr, const int* d_colInd,
	const double* d_b,
	double* d_x,
	double tol,
	int maxit,
	const double* d_M_inv,  // Jacobi preconditioner (diagonal inverse), can be nullptr
	bool usePreconditioner,
	bool useWarmStart = false,         // 是否使用warm start
	const double* d_x_prev = nullptr,  // warm start初始猜测
	// ===== 持久化GPU资源参数 =====
	cublasHandle_t cublasHandle_persistent = nullptr,     // 持久化的CUBLAS handle
	cusparseHandle_t cusparseHandle_persistent = nullptr, // 持久化的CUSPARSE handle
	double* d_r_persistent = nullptr,                     // 持久化的工作向量r
	double* d_p_persistent = nullptr,                     // 持久化的工作向量p
	double* d_Ap_persistent = nullptr                     // 持久化的工作向量Ap
) {
	using namespace std::chrono;

	SOLVER_PRINTF("[PCG] ======= PCG求解器开始 (预条件: %s) =======\n",
		   usePreconditioner ? "启用" : "禁用");
	SOLVER_PRINTF("[PCG] 问题规模: n=%d, nnz=%d\n", n, nnz);

	auto t_start = high_resolution_clock::now();

	// 1. 使用持久化的CUBLAS和CUSPARSE handles（如果提供）
	cublasHandle_t cublasHandle = cublasHandle_persistent;
	cusparseHandle_t cusparseHandle = cusparseHandle_persistent;
	bool handles_allocated_locally = false;

	// 如果没有提供持久化handles，则创建临时handles（向后兼容）
	if (cublasHandle == nullptr || cusparseHandle == nullptr) {
		cublasCreate(&cublasHandle);
		cusparseCreate(&cusparseHandle);
		handles_allocated_locally = true;
		SOLVER_PRINTF("[PCG] Using locally allocated CUDA handles (not persistent)\n");
	} else {
		SOLVER_PRINTF("[PCG] Using persistent CUDA handles (optimization enabled)\n");
	}

	// 2. 使用持久化工作向量（如果提供）
	double* d_r = d_r_persistent;
	double* d_p = d_p_persistent;
	double* d_Ap = d_Ap_persistent;
	double* d_z = nullptr;
	bool vectors_allocated_locally = false;

	// 如果没有提供持久化向量，则分配临时向量（向后兼容）
	if (d_r == nullptr || d_p == nullptr || d_Ap == nullptr) {
		cudaMalloc(&d_r, n * sizeof(double));
		cudaMalloc(&d_p, n * sizeof(double));
		cudaMalloc(&d_Ap, n * sizeof(double));
		vectors_allocated_locally = true;
		SOLVER_PRINTF("[PCG] Using locally allocated work vectors (not persistent)\n");
	} else {
		SOLVER_PRINTF("[PCG] Using persistent work vectors (optimization enabled)\n");
	}

	// d_z总是需要单独分配（PCG预条件专用）
	if (usePreconditioner) {
		cudaMalloc(&d_z, n * sizeof(double));
	}

	// ===== Warm Start初始化 =====
	if (useWarmStart && d_x_prev != nullptr) {
		// x0 = x_prev (warm start)
		cudaMemcpy(d_x, d_x_prev, n * sizeof(double), cudaMemcpyDeviceToDevice);
		SOLVER_PRINTF("[PCG] Using warm start (x0 from previous solution)\n");
	} else {
		// x0 = 0 (cold start)
		cudaMemset(d_x, 0, n * sizeof(double));
		SOLVER_PRINTF("[PCG] Using cold start (x0 = 0)\n");
	}

	// 3. 创建SpMV描述符
	cusparseSpMatDescr_t matA = nullptr;
	cusparseDnVecDescr_t vecP = nullptr, vecAp = nullptr;
	void* dBuffer = nullptr;
	size_t bufferSize = 0;
	double one = 1.0, zero = 0.0;

	cusparseCreateCsr(&matA, n, n, nnz,
		(void*)d_rowPtr, (void*)d_colInd, (void*)d_val,
		CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
		CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F);
	cusparseCreateDnVec(&vecP, n, d_p, CUDA_R_64F);
	cusparseCreateDnVec(&vecAp, n, d_Ap, CUDA_R_64F);
	cusparseSpMV_bufferSize(
		cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
		&one, matA, vecP, &zero, vecAp, CUDA_R_64F,
		CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize);
	cudaMalloc(&dBuffer, bufferSize);

	// 4. 初始化残差: r = b (冷启动) 或 r = b - A*x (热启动)
	if (useWarmStart && d_x_prev != nullptr) {
		// Warm start: 计算 r = b - A*x0
		// 首先 r = b
		cublasDcopy(cublasHandle, n, d_b, 1, d_r, 1);

		// 计算 A*x → d_Ap (临时使用d_Ap存储)
		cusparseDnVecDescr_t vecX;
		cusparseCreateDnVec(&vecX, n, d_x, CUDA_R_64F);
		cusparseSpMV(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
			&one, matA, vecX, &zero, vecAp, CUDA_R_64F,
			CUSPARSE_SPMV_ALG_DEFAULT, dBuffer);
		cusparseDestroyDnVec(vecX);

		// r = b - A*x (即 r = r - A*x)
		double minusOne = -1.0;
		cublasDaxpy(cublasHandle, n, &minusOne, d_Ap, 1, d_r, 1);

		// 输出初始残差范数（用于调试）
		double r0_norm;
		cublasDnrm2(cublasHandle, n, d_r, 1, &r0_norm);
		SOLVER_PRINTF("[PCG] Warm start initial residual: ||r0|| = %.6e\n", r0_norm);
	} else {
		// Cold start: r = b
		cublasDcopy(cublasHandle, n, d_b, 1, d_r, 1);
	}

	double rho, rho_old, alpha, beta;

	if (usePreconditioner && d_M_inv != nullptr) {
		// PCG: z = M^(-1) * r
		jacobiPrecondition(n, d_M_inv, d_r, d_z);
		// p = z
		cublasDcopy(cublasHandle, n, d_z, 1, d_p, 1);
		// rho = r^T * z
		cublasDdot(cublasHandle, n, d_r, 1, d_z, 1, &rho);
	} else {
		// 标准CG: p = r, rho = ||r||^2
		cublasDcopy(cublasHandle, n, d_r, 1, d_p, 1);
		double r_norm;
		cublasDnrm2(cublasHandle, n, d_r, 1, &r_norm);
		rho = r_norm * r_norm;
	}

	// 5. PCG迭代循环
	int iter = 0;
	auto t_iter_start = high_resolution_clock::now();

	for (; iter < maxit; ++iter) {
		// Ap = A*p
		cusparseSpMV(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
			&one, matA, vecP, &zero, vecAp, CUDA_R_64F,
			CUSPARSE_SPMV_ALG_DEFAULT, dBuffer);

		// tmp = p^T * Ap
		double tmp = 0.0;
		cublasDdot(cublasHandle, n, d_p, 1, d_Ap, 1, &tmp);

		if (fabs(tmp) < 1e-30) {
			SOLVER_PRINTF("[PCG] p^T*Ap too small, stopping at iteration %d\n", iter);
			break;
		}

		alpha = rho / tmp;

		// x = x + alpha * p
		cublasDaxpy(cublasHandle, n, &alpha, d_p, 1, d_x, 1);

		// r = r - alpha * Ap
		double tmp2 = -alpha;
		cublasDaxpy(cublasHandle, n, &tmp2, d_Ap, 1, d_r, 1);

		// 检查收敛
		double r_norm;
		cublasDnrm2(cublasHandle, n, d_r, 1, &r_norm);
		if (r_norm < tol) {
			SOLVER_PRINTF("[PCG] 收敛: 迭代%d次, 残差=%.2e\n", iter + 1, r_norm);
			break;
		}

		rho_old = rho;

		if (usePreconditioner && d_M_inv != nullptr) {
			// PCG: z = M^(-1) * r
			jacobiPrecondition(n, d_M_inv, d_r, d_z);

			// rho = r^T * z
			cublasDdot(cublasHandle, n, d_r, 1, d_z, 1, &rho);

			// beta = rho / rho_old
			beta = rho / rho_old;

			// p = z + beta * p
			cublasDscal(cublasHandle, n, &beta, d_p, 1);
			cublasDaxpy(cublasHandle, n, &one, d_z, 1, d_p, 1);
		} else {
			// 标准CG
			rho = r_norm * r_norm;
			beta = rho / rho_old;

			// p = r + beta * p
			cublasDscal(cublasHandle, n, &beta, d_p, 1);
			cublasDaxpy(cublasHandle, n, &one, d_r, 1, d_p, 1);
		}
	}

	auto t_iter_end = high_resolution_clock::now();
	double t_iter = duration<double>(t_iter_end - t_iter_start).count();

	SOLVER_PRINTF("[PCG] 迭代完成: %d次迭代, 耗时%.6f s\n", iter, t_iter);
	if (iter >= maxit) {
		SOLVER_PRINTF("[PCG] WARNING: 达到最大迭代次数\n");
	}

	// 6. 清理资源（只清理本地分配的资源，持久化资源由外部管理）
	// 清理工作向量（只清理本地分配的）
	if (vectors_allocated_locally) {
		if (d_r) cudaFree(d_r);
		if (d_p) cudaFree(d_p);
		if (d_Ap) cudaFree(d_Ap);
	}
	// d_z总是本地分配的，需要释放
	if (d_z) cudaFree(d_z);

	// 清理cusparse资源
	if (dBuffer) cudaFree(dBuffer);
	if (matA) cusparseDestroySpMat(matA);
	if (vecP) cusparseDestroyDnVec(vecP);
	if (vecAp) cusparseDestroyDnVec(vecAp);

	// 清理CUDA handles（只清理本地分配的）
	if (handles_allocated_locally) {
		if (cublasHandle) cublasDestroy(cublasHandle);
		if (cusparseHandle) cusparseDestroy(cusparseHandle);
	};

	auto t_end = high_resolution_clock::now();
	double t_total = duration<double>(t_end - t_start).count();
	SOLVER_PRINTF("[PCG] 总耗时: %.6f s\n", t_total);
	SOLVER_PRINTF("[PCG] ==========================================\n");

	return 0;
}


#if GPU_USE_MIXED_PRECISION
// ====== PCG FP32 (Mixed Precision) Solver ======
// FP32版本的PCG求解器，所有计算使用单精度浮点数
int pcgSolveGPU_fp32(
	int n, int nnz,
	const float* d_val, const int* d_rowPtr, const int* d_colInd,
	const float* d_b,
	float* d_x,
	float tol,
	int maxit,
	const float* d_M_inv,  // Jacobi preconditioner (diagonal inverse), can be nullptr
	bool usePreconditioner,
	bool useWarmStart = false,         // 是否使用warm start
	const float* d_x_prev = nullptr,   // warm start初始猜测
	// ===== 持久化GPU资源参数 =====
	cublasHandle_t cublasHandle_persistent = nullptr,     // 持久化的CUBLAS handle
	cusparseHandle_t cusparseHandle_persistent = nullptr, // 持久化的CUSPARSE handle
	float* d_r_persistent = nullptr,                      // 持久化的工作向量r
	float* d_p_persistent = nullptr,                      // 持久化的工作向量p
	float* d_Ap_persistent = nullptr                      // 持久化的工作向量Ap
) {
	using namespace std::chrono;

	SOLVER_PRINTF("[PCG_FP32] ======= PCG FP32求解器开始 (预条件: %s) =======\n",
		   usePreconditioner ? "启用" : "禁用");
	SOLVER_PRINTF("[PCG_FP32] 问题规模: n=%d, nnz=%d\n", n, nnz);

	auto t_start = high_resolution_clock::now();

	// 1. 使用持久化的CUBLAS和CUSPARSE handles（如果提供）
	cublasHandle_t cublasHandle = cublasHandle_persistent;
	cusparseHandle_t cusparseHandle = cusparseHandle_persistent;
	bool handles_allocated_locally = false;

	// 如果没有提供持久化handles，则创建临时handles（向后兼容）
	if (cublasHandle == nullptr || cusparseHandle == nullptr) {
		cublasCreate(&cublasHandle);
		cusparseCreate(&cusparseHandle);
		handles_allocated_locally = true;
		SOLVER_PRINTF("[PCG_FP32] Using locally allocated CUDA handles (not persistent)\n");
	} else {
		SOLVER_PRINTF("[PCG_FP32] Using persistent CUDA handles (optimization enabled)\n");
	}

	// 2. 使用持久化工作向量（如果提供）
	float* d_r = d_r_persistent;
	float* d_p = d_p_persistent;
	float* d_Ap = d_Ap_persistent;
	float* d_z = nullptr;
	bool vectors_allocated_locally = false;

	// 如果没有提供持久化向量，则分配临时向量（向后兼容）
	if (d_r == nullptr || d_p == nullptr || d_Ap == nullptr) {
		cudaMalloc(&d_r, n * sizeof(float));
		cudaMalloc(&d_p, n * sizeof(float));
		cudaMalloc(&d_Ap, n * sizeof(float));
		vectors_allocated_locally = true;
		SOLVER_PRINTF("[PCG_FP32] Using locally allocated work vectors (not persistent)\n");
	} else {
		SOLVER_PRINTF("[PCG_FP32] Using persistent work vectors (optimization enabled)\n");
	}

	// d_z总是需要单独分配（PCG预条件专用）
	if (usePreconditioner) {
		cudaMalloc(&d_z, n * sizeof(float));
	}

	// ===== Warm Start初始化 =====
	if (useWarmStart && d_x_prev != nullptr) {
		// x0 = x_prev (warm start)
		cudaMemcpy(d_x, d_x_prev, n * sizeof(float), cudaMemcpyDeviceToDevice);
		SOLVER_PRINTF("[PCG_FP32] Using warm start (x0 from previous solution)\n");
	} else {
		// x0 = 0 (cold start)
		cudaMemset(d_x, 0, n * sizeof(float));
		SOLVER_PRINTF("[PCG_FP32] Using cold start (x0 = 0)\n");
	}

	// 3. 创建SpMV描述符（FP32）
	cusparseSpMatDescr_t matA = nullptr;
	cusparseDnVecDescr_t vecP = nullptr, vecAp = nullptr;
	void* dBuffer = nullptr;
	size_t bufferSize = 0;
	float one = 1.0f, zero = 0.0f;

	cusparseCreateCsr(&matA, n, n, nnz,
		(void*)d_rowPtr, (void*)d_colInd, (void*)d_val,
		CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
		CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F);  // ← FP32
	cusparseCreateDnVec(&vecP, n, d_p, CUDA_R_32F);
	cusparseCreateDnVec(&vecAp, n, d_Ap, CUDA_R_32F);
	cusparseSpMV_bufferSize(
		cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
		&one, matA, vecP, &zero, vecAp, CUDA_R_32F,
		CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize);
	cudaMalloc(&dBuffer, bufferSize);

	// 4. 初始化残差: r = b (冷启动) 或 r = b - A*x (热启动)
	if (useWarmStart && d_x_prev != nullptr) {
		// Warm start: 计算 r = b - A*x0
		// 首先 r = b
		cublasScopy(cublasHandle, n, d_b, 1, d_r, 1);

		// 计算 A*x → d_Ap (临时使用d_Ap存储)
		cusparseDnVecDescr_t vecX;
		cusparseCreateDnVec(&vecX, n, d_x, CUDA_R_32F);
		cusparseSpMV(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
			&one, matA, vecX, &zero, vecAp, CUDA_R_32F,
			CUSPARSE_SPMV_ALG_DEFAULT, dBuffer);
		cusparseDestroyDnVec(vecX);

		// r = b - A*x (即 r = r - A*x)
		float minusOne = -1.0f;
		cublasSaxpy(cublasHandle, n, &minusOne, d_Ap, 1, d_r, 1);

		// 输出初始残差范数（用于调试）
		float r0_norm;
		cublasSnrm2(cublasHandle, n, d_r, 1, &r0_norm);
		SOLVER_PRINTF("[PCG_FP32] Warm start initial residual: ||r0|| = %.6e\n", r0_norm);
	} else {
		// Cold start: r = b
		cublasScopy(cublasHandle, n, d_b, 1, d_r, 1);
	}

	float rho, rho_old, alpha, beta;

	if (usePreconditioner && d_M_inv != nullptr) {
		// PCG: z = M^(-1) * r
		jacobiPrecondition_fp32(n, d_M_inv, d_r, d_z);
		// p = z
		cublasScopy(cublasHandle, n, d_z, 1, d_p, 1);
		// rho = r^T * z
		cublasSdot(cublasHandle, n, d_r, 1, d_z, 1, &rho);
	} else {
		// 标准CG: p = r, rho = ||r||^2
		cublasScopy(cublasHandle, n, d_r, 1, d_p, 1);
		float r_norm;
		cublasSnrm2(cublasHandle, n, d_r, 1, &r_norm);
		rho = r_norm * r_norm;
	}

	// 5. PCG迭代循环
	int iter = 0;
	auto t_iter_start = high_resolution_clock::now();

	for (; iter < maxit; ++iter) {
		// Ap = A*p
		cusparseSpMV(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
			&one, matA, vecP, &zero, vecAp, CUDA_R_32F,
			CUSPARSE_SPMV_ALG_DEFAULT, dBuffer);

		// tmp = p^T * Ap
		float tmp = 0.0f;
		cublasSdot(cublasHandle, n, d_p, 1, d_Ap, 1, &tmp);

		if (fabsf(tmp) < 1e-20f) {
			SOLVER_PRINTF("[PCG_FP32] p^T*Ap too small, stopping at iteration %d\n", iter);
			break;
		}

		alpha = rho / tmp;

		// x = x + alpha * p
		cublasSaxpy(cublasHandle, n, &alpha, d_p, 1, d_x, 1);

		// r = r - alpha * Ap
		float tmp2 = -alpha;
		cublasSaxpy(cublasHandle, n, &tmp2, d_Ap, 1, d_r, 1);

		// 检查收敛
		float r_norm;
		cublasSnrm2(cublasHandle, n, d_r, 1, &r_norm);
		if (r_norm < tol) {
			SOLVER_PRINTF("[PCG_FP32] 收敛: 迭代%d次, 残差=%.2e\n", iter + 1, r_norm);
			break;
		}

		rho_old = rho;

		if (usePreconditioner && d_M_inv != nullptr) {
			// PCG: z = M^(-1) * r
			jacobiPrecondition_fp32(n, d_M_inv, d_r, d_z);

			// rho = r^T * z
			cublasSdot(cublasHandle, n, d_r, 1, d_z, 1, &rho);

			// beta = rho / rho_old
			beta = rho / rho_old;

			// p = z + beta * p
			cublasSscal(cublasHandle, n, &beta, d_p, 1);
			cublasSaxpy(cublasHandle, n, &one, d_z, 1, d_p, 1);
		} else {
			// 标准CG
			rho = r_norm * r_norm;
			beta = rho / rho_old;

			// p = r + beta * p
			cublasSscal(cublasHandle, n, &beta, d_p, 1);
			cublasSaxpy(cublasHandle, n, &one, d_r, 1, d_p, 1);
		}
	}

	auto t_iter_end = high_resolution_clock::now();
	double t_iter = duration<double>(t_iter_end - t_iter_start).count();

	SOLVER_PRINTF("[PCG_FP32] 迭代完成: %d次迭代, 耗时%.6f s\n", iter, t_iter);
	if (iter >= maxit) {
		SOLVER_PRINTF("[PCG_FP32] WARNING: 达到最大迭代次数\n");
	}

	// 6. 清理资源（只清理本地分配的资源，持久化资源由外部管理）
	// 清理工作向量（只清理本地分配的）
	if (vectors_allocated_locally) {
		if (d_r) cudaFree(d_r);
		if (d_p) cudaFree(d_p);
		if (d_Ap) cudaFree(d_Ap);
	}
	// d_z总是本地分配的，需要释放
	if (d_z) cudaFree(d_z);

	// 清理cusparse资源
	if (dBuffer) cudaFree(dBuffer);
	if (matA) cusparseDestroySpMat(matA);
	if (vecP) cusparseDestroyDnVec(vecP);
	if (vecAp) cusparseDestroyDnVec(vecAp);

	// 清理CUDA handles（只清理本地分配的）
	if (handles_allocated_locally) {
		if (cublasHandle) cublasDestroy(cublasHandle);
		if (cusparseHandle) cusparseDestroy(cusparseHandle);
	};

	auto t_end = high_resolution_clock::now();
	double t_total = duration<double>(t_end - t_start).count();
	SOLVER_PRINTF("[PCG_FP32] 总耗时: %.6f s\n", t_total);
	SOLVER_PRINTF("[PCG_FP32] ==========================================\n");

	return 0;
}
#endif // GPU_USE_MIXED_PRECISION

#if GPU_USE_MIXED_PRECISION

// ====== Persistent GPU Resources Management ======
/**
 * 初始化持久化GPU资源
 * - 首次调用时创建全局CUBLAS/CUSPARSE handles（所有实例共享）
 * - 尺寸变化时重新分配工作向量（每个实例独立）
 */
int GPUSolver::initializePersistentResources() {
	// 初始化CUDA handles（全局共享，只初始化一次）
	if (!s_handlesInitialized) {
		GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
			"Initializing global CUDA handles");

		cublasStatus_t cublas_stat = cublasCreate(&s_cublasHandle);
		if (cublas_stat != CUBLAS_STATUS_SUCCESS) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to create cublas handle: " << cublas_stat);
			return -1;
		}

		cusparseStatus_t cusparse_stat = cusparseCreate(&s_cusparseHandle);
		if (cusparse_stat != CUSPARSE_STATUS_SUCCESS) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to create cusparse handle: " << cusparse_stat);
			cublasDestroy(s_cublasHandle);
			return -1;
		}

		s_handlesInitialized = true;

		GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
			"Global CUDA handles initialized successfully");
	}

	// 分配或重新分配工作向量（如果尺寸变化）
	if (persistentSize < size) {
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
			"Allocating persistent work vectors for size=" << size);

		// 释放旧的（如果有）
		if (d_r_persistent) cudaFree(d_r_persistent);
		if (d_p_persistent) cudaFree(d_p_persistent);
		if (d_Ap_persistent) cudaFree(d_Ap_persistent);

		// 分配新的
		cudaError_t err;
		err = cudaMalloc(&d_r_persistent, size * sizeof(double));
		if (err != cudaSuccess) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to allocate d_r_persistent: " << cudaGetErrorString(err));
			return -1;
		}

		err = cudaMalloc(&d_p_persistent, size * sizeof(double));
		if (err != cudaSuccess) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to allocate d_p_persistent: " << cudaGetErrorString(err));
			cudaFree(d_r_persistent);
			return -1;
		}

		err = cudaMalloc(&d_Ap_persistent, size * sizeof(double));
		if (err != cudaSuccess) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to allocate d_Ap_persistent: " << cudaGetErrorString(err));
			cudaFree(d_r_persistent);
			cudaFree(d_p_persistent);
			return -1;
		}

#if GPU_USE_MIXED_PRECISION
		// 分配FP32工作向量
		if (d_r_persistent_fp32) cudaFree(d_r_persistent_fp32);
		if (d_p_persistent_fp32) cudaFree(d_p_persistent_fp32);
		if (d_Ap_persistent_fp32) cudaFree(d_Ap_persistent_fp32);

		err = cudaMalloc(&d_r_persistent_fp32, size * sizeof(float));
		if (err != cudaSuccess) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to allocate d_r_persistent_fp32: " << cudaGetErrorString(err));
			return -1;
		}

		err = cudaMalloc(&d_p_persistent_fp32, size * sizeof(float));
		if (err != cudaSuccess) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to allocate d_p_persistent_fp32: " << cudaGetErrorString(err));
			cudaFree(d_r_persistent_fp32);
			return -1;
		}

		err = cudaMalloc(&d_Ap_persistent_fp32, size * sizeof(float));
		if (err != cudaSuccess) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to allocate d_Ap_persistent_fp32: " << cudaGetErrorString(err));
			cudaFree(d_r_persistent_fp32);
			cudaFree(d_p_persistent_fp32);
			return -1;
		}

		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
			"FP32 persistent work vectors allocated successfully");
#endif

		persistentSize = size;

		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
			"Persistent work vectors allocated successfully");
	}

	return 0;
}
#endif // GPU_USE_MIXED_PRECISION
// ====== End of Persistent GPU Resources Management ======

// 求解线性方程组
int GPUSolver::solve(void)
{
	// ====== 重要说明 ======
	// GPU矩阵组装现在使用FE_Element的变换结果 + CPU端组装
	// Phase 1方案：保证正确性，为Phase 2完全GPU化打基础

	// ====== 初始化持久化GPU资源 ======
	// 首次调用或尺寸变化时分配持久化工作向量和全局CUDA handles
	int init_ret = initializePersistentResources();
	if (init_ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
			"Failed to initialize persistent GPU resources");
		return init_ret;
	}
	// ====== End of Persistent Resources Initialization ======

	// ====== GPU矩阵组装 (Phase 1: 使用FE_Element变换) ======
	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"Calling formTangent_GPU() to assemble stiffness matrix");

	startTimer(TimerType::MATRIX_ASSEMBLY);
	int ret = formTangent_GPU();
	stopTimer(TimerType::MATRIX_ASSEMBLY);

	if (ret != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
						"formTangent_GPU() failed with error code: " << ret);
		return ret;
	}

	GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO,
					"GPU matrix assembly completed successfully");

	// ====== Step 1: GPU求解 ======
	// 启动总求解时间计时
	startTimer(TimerType::TOTAL_SOLVE);

	using namespace std::chrono;
	auto t_start = high_resolution_clock::now();

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "solve() called, size=" << size << " nnz=" << nnz);
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG, "Starting GPU solver computation");
	
	//printf("[DEBUG] enter solve\n");
	fflush(stdout);
	// --- 调试输出 ---
	// printf("[DEBUG] solve: size=%d nnz=%d\n", size, nnz);
	// for (int i = 0; i < size+1; ++i) printf("rowPtr[%d]=%d ", i, rowPtr[i]);
	// printf("\n");
	// for (int k = 0; k < nnz; ++k) printf("colInd[%d]=%d val[%d]=%g ", k, colInd[k], k, val[k]);
	// printf("\n");
	// ⚠️ 性能优化：assert验证循环移到setSize()中，不在每次solve()执行
	assert(size > 0 && nnz > 0);
	assert(rowPtr.size() == size + 1);
	assert(colInd.size() == nnz);
	assert(val.size() == nnz);
	#if 0  // 只在setSize()中验证一次，不在solve()中重复验证
	for (int i = 0; i < size; ++i) {
		assert(rowPtr[i + 1] >= rowPtr[i]);
		for (int k = rowPtr[i]; k < rowPtr[i + 1]; ++k) {
			assert(colInd[k] >= 0 && colInd[k] < size);
		}
	}
	#endif

	// 启动内存传输计时
	startTimer(TimerType::MEMORY_TRANSFER);
	auto t0 = high_resolution_clock::now();

	// ====== 调试：对比CPU val和GPU d_val的矩阵内容 ======
	// ⚠️ 性能提示：此调试代码会导致严重性能损失（每次solve拷贝2.8MB + 70万次循环）
	// 仅在需要调试矩阵组装问题时启用
	#if 0  // 默认关闭，需要时改为 #if 1
	std::vector<double> gpu_val(nnz);
	CHECK_CUDA(cudaMemcpy(gpu_val.data(), d_val, nnz * sizeof(double), cudaMemcpyDeviceToHost));

	// 统计非零元素
	int cpu_nonzero = 0, gpu_nonzero = 0;
	double cpu_sum = 0.0, gpu_sum = 0.0;
	for (int i = 0; i < nnz; i++) {
		if (std::abs(val[i]) > 1e-15) {
			cpu_nonzero++;
			cpu_sum += std::abs(val[i]);
		}
		if (std::abs(gpu_val[i]) > 1e-15) {
			gpu_nonzero++;
			gpu_sum += std::abs(gpu_val[i]);
		}
	}

	DEBUG_COUT << "\n[DEBUG] Matrix Comparison (nnz=" << nnz << "):\n";
	DEBUG_COUT << "CPU val: nonzero=" << cpu_nonzero << " sum(abs)=" << cpu_sum << "\n";
	DEBUG_COUT << "GPU d_val: nonzero=" << gpu_nonzero << " sum(abs)=" << gpu_sum << "\n";
	DEBUG_COUT << "CPU val[0-10]: ";
	for (int i = 0; i < std::min(10, nnz); i++) DEBUG_COUT << val[i] << " ";
	DEBUG_COUT << "\nGPU d_val[0-10]: ";
	for (int i = 0; i < std::min(10, nnz); i++) DEBUG_COUT << gpu_val[i] << " ";
	DEBUG_COUT << "\n\n";
	fflush(stdout);
	#endif
	// ====== End of debug code ======

	// ✅ 如果GPU已组装矩阵，跳过CPU矩阵传输（使用GPU矩阵）
	if (!m_matrixOnGPU) {
		DEBUG_COUT << "[DEBUG] Copying CPU matrix to GPU (m_matrixOnGPU=false)\n";
		CHECK_CUDA(cudaMemcpy(d_val, val.data(), nnz * sizeof(double), cudaMemcpyHostToDevice));
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
						"CPU matrix transferred to GPU");
	} else {
		DEBUG_COUT << "[DEBUG] Using GPU-assembled matrix (m_matrixOnGPU=true)\n";
	}

	// RHS向量总是需要传输（每次迭代都会更新）
	CHECK_CUDA(cudaMemcpy(d_B, &B(0), size * sizeof(double), cudaMemcpyHostToDevice));
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
					"RHS vector transferred to GPU");

	auto t1 = high_resolution_clock::now();
	double t_memcpy = duration<double>(t1 - t0).count();
	stopTimer(TimerType::MEMORY_TRANSFER);
	//printf("[TIMER] cudaMemcpy H2D: %.6f s\n", t_memcpy);
	fflush(stdout);

	// ✅ 使用持久化的cuSolver句柄（在构造函数中已创建）
	// cusparseMatDescr_t descrA_local = nullptr;  // ❌ 删除
	// CHECK_CUSPARSE(cusparseCreateMatDescr(&descrA_local));  // ❌ 删除
	// CHECK_CUSPARSE(cusparseSetMatType(descrA_local, CUSPARSE_MATRIX_TYPE_GENERAL));  // ❌ 删除
	// CHECK_CUSPARSE(cusparseSetMatIndexBase(descrA_local, CUSPARSE_INDEX_BASE_ZERO));  // ❌ 删除
	// cusolverSpHandle_t solverHandle = nullptr;  // ❌ 删除
	// CHECK_CUSOLVER(cusolverSpCreate(&solverHandle));  // ❌ 删除

	t0 = high_resolution_clock::now();
	double tol = 1e-3;        // 收敛容限（如用于cusolverSpDcsrlsvqr、cgSolveGPU等，控制迭代精度）
	int reorder = 0;           // 是否对矩阵进行重排序（0表示不重排序，1表示重排序，常用于cusolver求解器以优化性能）
	int singularity = -1;      // 奇异性标志（cusolver求解器输出参数，-1表示无奇异，>=0表示矩阵在该行奇异）
	int maxit = 200;          // 最大迭代次数（如用于共轭梯度法cgSolveGPU，防止死循环）
	double one = 1.0, zero = 0.0; // 常用的数值常量（如在cuSPARSE稀疏矩阵-向量乘法SpMV时用作alpha/beta系数）


	auto t2 = high_resolution_clock::now();
	// ================== 求解方式切换说明 ==================
	// 在此处可以切换不同的线性方程组求解方法，常用方式如下：
	// 1. CPU LU分解法（仅用于调试或小型问题）：
	//     cusolverSpDcsrlsvluHost
	// 2. GPU QR分解法（推荐，适用于一般稀疏矩阵）：
	//     cusolverSpDcsrlsvqr 
	// 3. GPU Cholesky分解法（适用于对称正定稀疏矩阵）：
	//     cusolverSpDcsrlsvchol 
	// 4. GPU 共轭梯度法（CG，适用于对称正定稀疏矩阵）：
	//     cgSolveGPU 
	//
	// 注意：不同方法对矩阵类型有要求，选择时请根据实际问题类型选择合适的求解器。


	//////////////////////////////////// Host计算 ////////////////////////////////
	//CHECK_CUSOLVER(cusolverSpDcsrlsvluHost(
	//	solverHandle,
	//	size,
	//	nnz,
	//	descrA_local,
	//	val.data(),      // host
	//	rowPtr.data(),   // host
	//	colInd.data(),   // host
	//	&B(0),           // host
	//	tol,
	//	reorder,
	//	&X(0),           // host
	//	&singularity
	//));
	//auto t3 = high_resolution_clock::now();
	//double t_solve = duration<double>(t3 - t2).count();
	//printf("[TIMER] cusolverSpDcsrlsvluHost: %.6f s\n", t_solve);

	//////////////////////////////////// GPU Device QR 计算 ////////////////////////////
	//CHECK_CUSOLVER(cusolverSpDcsrlsvqr(
	//	solverHandle,
	//	size,
	//	nnz,
	//	descrA_local,
	//	d_val,      // device
	//	d_rowPtr,   // device
	//	d_colInd,   // device
	//	d_B,        // device
	//	tol,
	//	reorder,
	//	d_X,        // device
	//	&singularity
	//));
	//auto t3 = high_resolution_clock::now();
	//double t_solve = duration<double>(t3 - t2).count();
	//printf("[TIMER] cusolverSpDcsrlsvqr: %.6f s\n", t_solve);

	////////////////////////////////////// GPU Device Cholesky 计算 ///////////////////////
	// ❌ Cholesky分解：性能差（串行依赖强，fill-in严重）
	// 测试结果：297ms/次，慢于CPU的211ms/次
	/*
	startTimer(TimerType::GPU_COMPUTATION);
	CHECK_CUSOLVER(cusolverSpDcsrlsvchol(
		m_cusolverHandle,  // ✅ 使用持久化句柄
		size,
		nnz,
		descrA,  // ✅ 使用持久化描述符
		d_val,      // device
		d_rowPtr,   // device
		d_colInd,   // device
		d_B,        // device
		tol,
		reorder,
		d_X,        // device
		&singularity
	));
	stopTimer(TimerType::GPU_COMPUTATION);
	*/


	////////////////////////////////// GPU Device CG迭代 计算 ///////////////////////
	// ✅ 共轭梯度法（CG）：适合GPU并行，预期性能提升5-10倍
	// ✅ 预条件共轭梯度法（PCG）：使用Jacobi预条件，预期迭代次数减少2-3倍

	// 构建Jacobi预条件矩阵（如果启用且尚未构建）
	if (usePreconditioner && !preconditionerBuilt) {
		int ret_prec = buildJacobiPreconditioner();
		if (ret_prec != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
				"Failed to build Jacobi preconditioner, falling back to standard CG");
			usePreconditioner = false;  // 回退到标准CG
		}
	}

	startTimer(TimerType::GPU_COMPUTATION);
	int cg_ret;

	if (usePreconditioner && d_M_inv != nullptr) {
		// 使用PCG求解器（带Jacobi预条件）
		SOLVER_PRINTF("[SOLVER] Using PCG solver with Jacobi preconditioner\n");

		// ===== Warm Start: 检查是否可以使用上一次的解作为初始猜测 =====
		bool canUseWarmStart = warmStartEnabled && hasWarmStartData && d_X_prev != nullptr;
		if (canUseWarmStart) {
			warmStartCounter++;
			SOLVER_PRINTF("[SOLVER] Warm start enabled (solve #%d with warm start)\n", warmStartCounter);
		} else {
			coldStartCounter++;
			SOLVER_PRINTF("[SOLVER] Cold start (solve #%d without warm start)\n", coldStartCounter);
		}

#if GPU_USE_MIXED_PRECISION
		// ===== FP32混合精度求解器 =====
		SOLVER_PRINTF("[SOLVER] Using FP32 mixed-precision PCG solver\n");

		// 分配FP32设备内存
		float *d_val_fp32 = nullptr, *d_B_fp32 = nullptr, *d_X_fp32 = nullptr;
		float *d_M_inv_fp32 = nullptr, *d_X_prev_fp32 = nullptr;
		float *d_r_fp32 = nullptr, *d_p_fp32 = nullptr, *d_Ap_fp32 = nullptr;

		CHECK_CUDA(cudaMalloc(&d_val_fp32, nnz * sizeof(float)));
		CHECK_CUDA(cudaMalloc(&d_B_fp32, size * sizeof(float)));
		CHECK_CUDA(cudaMalloc(&d_X_fp32, size * sizeof(float)));
		CHECK_CUDA(cudaMalloc(&d_r_fp32, size * sizeof(float)));
		CHECK_CUDA(cudaMalloc(&d_p_fp32, size * sizeof(float)));
		CHECK_CUDA(cudaMalloc(&d_Ap_fp32, size * sizeof(float)));
		if (d_M_inv) {
			CHECK_CUDA(cudaMalloc(&d_M_inv_fp32, size * sizeof(float)));
		}
		if (canUseWarmStart && d_X_prev) {
			CHECK_CUDA(cudaMalloc(&d_X_prev_fp32, size * sizeof(float)));
		}

		// FP64->FP32转换
		convertDoubleToFloat(nnz, d_val, d_val_fp32);
		convertDoubleToFloat(size, d_B, d_B_fp32);
		if (d_M_inv && d_M_inv_fp32) {
			convertDoubleToFloat(size, d_M_inv, d_M_inv_fp32);
		}
		if (canUseWarmStart && d_X_prev && d_X_prev_fp32) {
			convertDoubleToFloat(size, d_X_prev, d_X_prev_fp32);
		}

		// 调用FP32 PCG求解器
		cg_ret = pcgSolveGPU_fp32(
			size, nnz,
			d_val_fp32, d_rowPtr, d_colInd,
			d_B_fp32, d_X_fp32,
			static_cast<float>(tol), maxit,
			d_M_inv_fp32, true,
			canUseWarmStart, d_X_prev_fp32,
			s_cublasHandle, s_cusparseHandle,
			d_r_fp32, d_p_fp32, d_Ap_fp32
		);

		// FP32->FP64转换结果
		convertFloatToDouble(size, d_X_fp32, d_X);

		// 释放FP32临时内存
		cudaFree(d_val_fp32);
		cudaFree(d_B_fp32);
		cudaFree(d_X_fp32);
		cudaFree(d_r_fp32);
		cudaFree(d_p_fp32);
		cudaFree(d_Ap_fp32);
		if (d_M_inv_fp32) cudaFree(d_M_inv_fp32);
		if (d_X_prev_fp32) cudaFree(d_X_prev_fp32);
#else
		// ===== FP64标准求解器 =====
		cg_ret = pcgSolveGPU(
			size,       // 矩阵维数
			nnz,        // 非零元个数
			d_val,      // 稀疏矩阵A的值（设备端）
			d_rowPtr,   // CSR格式的行指针（设备端）
			d_colInd,   // CSR格式的列索引（设备端）
			d_B,        // 右端项B（设备端）
			d_X,        // 解向量X（设备端，结果写入此处）
			tol,        // 收敛容限
			maxit,      // 最大迭代次数
			d_M_inv,    // Jacobi预条件矩阵
			true,       // 启用预条件
			canUseWarmStart,  // 是否使用warm start
			d_X_prev,   // warm start初始猜测
			// ===== 持久化GPU资源 =====
			s_cublasHandle,      // 持久化CUBLAS handle
			s_cusparseHandle,    // 持久化CUSPARSE handle
			d_r_persistent,      // 持久化工作向量r
			d_p_persistent,      // 持久化工作向量p
			d_Ap_persistent      // 持久化工作向量Ap
		);
#endif
	} else {
		// 使用标准CG求解器（不支持warm start）
		SOLVER_PRINTF("[SOLVER] Using standard CG solver (no preconditioner)\n");

		// 标准CG目前不支持warm start，记录为cold start
		coldStartCounter++;
		SOLVER_PRINTF("[SOLVER] Cold start (standard CG, solve #%d)\n", coldStartCounter);

		cg_ret = cgSolveGPU(
			size,       // 矩阵维数
			nnz,        // 非零元个数
			d_val,      // 稀疏矩阵A的值（设备端）
			d_rowPtr,   // CSR格式的行指针（设备端）
			d_colInd,   // CSR格式的列索引（设备端）
			d_B,        // 右端项B（设备端）
			d_X,        // 解向量X（设备端，结果写入此处）
			tol,        // 收敛容限
			maxit       // 最大迭代次数
		);
	}

	stopTimer(TimerType::GPU_COMPUTATION);
	auto t3 = high_resolution_clock::now();
	double t_solve = duration<double>(t3 - t2).count();

	// 检查CG求解器返回状态
	if (cg_ret != 0) {
		printf("[ERROR] cgSolveGPU failed with error code: %d\n", cg_ret);
		return -1;
	}

	// ===== Warm Start: 求解成功后保存当前解供下次使用 =====
	if (warmStartEnabled) {
		// 分配d_X_prev（如果还没有）
		if (d_X_prev == nullptr) {
			cudaError_t err = cudaMalloc(&d_X_prev, size * sizeof(double));
			if (err != cudaSuccess) {
				GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
					"Failed to allocate d_X_prev, warm start disabled");
				warmStartEnabled = false;
			}
		}

		if (d_X_prev != nullptr) {
			// 保存当前解: x_prev = x
			cudaMemcpy(d_X_prev, d_X, size * sizeof(double), cudaMemcpyDeviceToDevice);
			hasWarmStartData = true;
			GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
				"Warm start data saved for next solve");
		}
	}

	fflush(stdout);

	// Note: CG迭代法不输出singularity参数（Cholesky专用）
	// CG收敛性由内部迭代判断，如果不收敛会返回非0错误码

	// ✅ 不再销毁句柄（在析构函数中统一销毁）
	// cusparseDestroyMatDescr(descrA_local);  // ❌ 删除
	// cusolverSpDestroy(solverHandle);  // ❌ 删除

	auto t4 = high_resolution_clock::now();
	// 需要输出时再拷贝X回主机
	// 启动内存传输计时（GPU->CPU）
	startTimer(TimerType::MEMORY_TRANSFER);
	CHECK_CUDA(cudaMemcpy(&X(0), d_X, size * sizeof(double), cudaMemcpyDeviceToHost));
	stopTimer(TimerType::MEMORY_TRANSFER);
	auto t5 = high_resolution_clock::now();
	double t_free = duration<double>(t5 - t4).count();
	//printf("[TIMER] destroy/freeGPU: %.6f s\n", t_free);
	fflush(stdout);

	auto t_end = high_resolution_clock::now();
	double t_total = duration<double>(t_end - t_start).count();
	
	// 停止总求解时间计时
	stopTimer(TimerType::TOTAL_SOLVE);

	// ✅ 每次solve()调用后都打印性能统计（用于性能分析）
	static int solveCount = 0;
	solveCount++;
	if (solveCount % 5 == 0) {  // 每5次打印一次（10步分析会打印2次）
		printf("\n[GPU_PERF] === Performance Statistics (after %d solve calls) ===\n", solveCount);
		printTimingStatistics();

		// 打印Warm Start统计
		SOLVER_PRINTF("\n[WARM_START] === Warm Start Statistics ===\n");
		SOLVER_PRINTF("  Cold starts: %d\n", coldStartCounter);
		SOLVER_PRINTF("  Warm starts: %d\n", warmStartCounter);
		if (warmStartCounter + coldStartCounter > 0) {
			double warmRatio = 100.0 * warmStartCounter / (warmStartCounter + coldStartCounter);
			SOLVER_PRINTF("  Warm start ratio: %.1f%%\n", warmRatio);
		}
		SOLVER_PRINTF("[WARM_START] ================================\n\n");
	}

	// 打印本次求解的详细性能统计（调试模式）
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"solve() completed - total time: " << t_total << "s");
	
	//printf("[TIMER] Total solve time: %.6f s\n", t_total);
	fflush(stdout);

	// printf("[DEBUG] X after solve: ");
	// for (int i = 0; i < size; ++i) printf("%g ", X[i]);
	// printf("\n");

	//printf("[DEBUG] leave solve\n");
	fflush(stdout);

	return 0;
}

/**
 * @brief 提交材料状态（收敛后调用）
 *
 * Newton-Raphson迭代收敛后，将trial状态复制到committed状态
 */
int GPUSolver::commitState(void)
{
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
		"Committing material states (trial -> committed)");

	if (numGaussPoints <= 0) {
		// 没有非线性材料，无需commit
		return 0;
	}

	if (d_materialStates_committed == nullptr || d_materialStates_trial == nullptr) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
			"Material state buffers not allocated, skipping commit");
		return 0;
	}

	// 复制trial状态到committed状态
	size_t stateBufferSize = numGaussPoints * sizeof(ConcreteState);
	cudaError_t err = cudaMemcpy(d_materialStates_committed, d_materialStates_trial,
	                             stateBufferSize, cudaMemcpyDeviceToDevice);

	if (err != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
			"Failed to commit material states: " << cudaGetErrorString(err));
		return -1;
	}

	// 复制trial应变到committed应变
	size_t strainBufferSize = numGaussPoints * 6 * sizeof(double);
	err = cudaMemcpy(d_strainTotal, d_strainCurrent_trial,
	                 strainBufferSize, cudaMemcpyDeviceToDevice);

	if (err != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
			"Failed to commit strain history: " << cudaGetErrorString(err));
		return -1;
	}

	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
		"Material states and strain history committed successfully");

	return 0;
}

int GPUSolver::sendSelf(int cTag, Channel& theChannel) {
	return 0;
}

int GPUSolver::recvSelf(int cTag, Channel& theChannel, FEM_ObjectBroker& theBroker) {
	return 0;
}













// ===================== CG Solver Implementation =====================
// 用于在GPU上用共轭梯度法(CG)求解稀疏对称正定线性方程组 Ax = b
// 参数：
//   n, nnz         : 矩阵A的维数和非零元个数
//   d_val, d_rowPtr, d_colInd : CSR格式稀疏矩阵A（设备端）
//   d_b            : 右端项b（设备端）
//   d_x            : 解向量x（设备端，结果写入此处）
//   tol            : 收敛容限
//   maxit          : 最大迭代次数
// 返回值：0表示成功
int cgSolveGPU(
	int n, int nnz,
	const double* d_val, const int* d_rowPtr, const int* d_colInd,
	const double* d_b,
	double* d_x,
	double tol,
	int maxit
) {
	using namespace std::chrono;
	
	printf("[CG_DETAILED] ======= CG求解器详细耗时分析 =======\n");
	printf("[CG_DETAILED] 问题规模: n=%d, nnz=%d\n", n, nnz);
	
	// 1. GPU初始化和内存分配
	auto t_init_start = high_resolution_clock::now();
	
	cublasHandle_t cublasHandle = nullptr;
	cusparseHandle_t cusparseHandle = nullptr;
	cublasCreate(&cublasHandle);
	cusparseCreate(&cusparseHandle);

	// 分配 device 向量 r, p, Ap
	double* d_r = nullptr, * d_p = nullptr, * d_Ap = nullptr;
	cudaMalloc(&d_r, n * sizeof(double));
	cudaMalloc(&d_p, n * sizeof(double));
	cudaMalloc(&d_Ap, n * sizeof(double));
	cudaMemset(d_x, 0, n * sizeof(double)); // x = 0

	// 创建 SpMV 相关描述符
	cusparseSpMatDescr_t matA = nullptr;
	cusparseDnVecDescr_t vecP = nullptr, vecAp = nullptr;
	void* dBuffer = nullptr;
	size_t bufferSize = 0;
	double one = 1.0, zero = 0.0;

	// 稀疏矩阵A描述符（CSR格式）
	cusparseCreateCsr(&matA, n, n, nnz,
		(void*)d_rowPtr, (void*)d_colInd, (void*)d_val,
		CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
		CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F);
	// 稠密向量描述符
	cusparseCreateDnVec(&vecP, n, d_p, CUDA_R_64F);
	cusparseCreateDnVec(&vecAp, n, d_Ap, CUDA_R_64F);
	// 查询SpMV所需buffer大小
	cusparseSpMV_bufferSize(
		cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
		&one, matA, vecP, &zero, vecAp, CUDA_R_64F,
		CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize);
	cudaMalloc(&dBuffer, bufferSize);
	
	auto t_init_end = high_resolution_clock::now();
	double t_init = duration<double>(t_init_end - t_init_start).count();
	printf("[CG_DETAILED] 1. GPU初始化+内存分配: %.6f s\n", t_init);

	// 2. 向量初始化  
	auto t_vec_init_start = high_resolution_clock::now();
	cublasDcopy(cublasHandle, n, d_b, 1, d_r, 1);
	cublasDcopy(cublasHandle, n, d_b, 1, d_p, 1);
	auto t_vec_init_end = high_resolution_clock::now();
	double t_vec_init = duration<double>(t_vec_init_end - t_vec_init_start).count();
	printf("[CG_DETAILED] 2. 向量初始化: %.6f s\n", t_vec_init);

	// 3. CG迭代循环 - 详细计时
	auto t_cg_start = high_resolution_clock::now();
	double t_spmv_total = 0.0, t_dot_total = 0.0, t_axpy_total = 0.0, t_norm_total = 0.0;
	int spmv_count = 0, dot_count = 0, axpy_count = 0, norm_count = 0;
	
	double rho, rhop, alpha, beta;
	
	// 初始残差范数计算
	auto t_norm_start = high_resolution_clock::now();
	cublasDnrm2(cublasHandle, n, d_r, 1, &rho); // rho = ||r||
	auto t_norm_end = high_resolution_clock::now();
	t_norm_total += duration<double>(t_norm_end - t_norm_start).count();
	norm_count++;
	
	rho = rho * rho;
	int iter = 0;
	
	for (; iter < maxit; ++iter) {
		// Ap = A*p (SpMV - 通常是最耗时的操作)
		auto t_spmv_start = high_resolution_clock::now();
		cusparseSpMV(
			cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE,
			&one, matA, vecP, &zero, vecAp, CUDA_R_64F,
			CUSPARSE_SPMV_ALG_DEFAULT, dBuffer);
		auto t_spmv_end = high_resolution_clock::now();
		t_spmv_total += duration<double>(t_spmv_end - t_spmv_start).count();
		spmv_count++;

		// tmp = p^T * Ap (向量内积)
		auto t_dot_start = high_resolution_clock::now();
		double tmp = 0.0;
		cublasDdot(cublasHandle, n, d_p, 1, d_Ap, 1, &tmp);
		alpha = rho / tmp;
		auto t_dot_end = high_resolution_clock::now();
		t_dot_total += duration<double>(t_dot_end - t_dot_start).count();
		dot_count++;

		// x = x + alpha * p, r = r - alpha * Ap (向量加法操作)
		auto t_axpy_start = high_resolution_clock::now();
		cublasDaxpy(cublasHandle, n, &alpha, d_p, 1, d_x, 1);
		double tmp2 = -alpha;
		cublasDaxpy(cublasHandle, n, &tmp2, d_Ap, 1, d_r, 1);
		auto t_axpy_end = high_resolution_clock::now();
		t_axpy_total += duration<double>(t_axpy_end - t_axpy_start).count();
		axpy_count += 2;

		rhop = rho;
		
		// 计算新的残差范数
		t_norm_start = high_resolution_clock::now();
		cublasDnrm2(cublasHandle, n, d_r, 1, &rho); // rho = ||r||
		t_norm_end = high_resolution_clock::now();
		t_norm_total += duration<double>(t_norm_end - t_norm_start).count();
		norm_count++;
		
		if (rho < tol) {
			printf("[CG_DETAILED] CG收敛: 迭代%d次, 残差=%.2e\n", iter+1, sqrt(rho));
			break;
		}
		rho = rho * rho;
		beta = rho / rhop;

		// p = r + beta * p (更多向量操作)
		auto t_axpy2_start = high_resolution_clock::now();
		cublasDscal(cublasHandle, n, &beta, d_p, 1);
		cublasDaxpy(cublasHandle, n, &one, d_r, 1, d_p, 1);
		auto t_axpy2_end = high_resolution_clock::now();
		t_axpy_total += duration<double>(t_axpy2_end - t_axpy2_start).count();
		axpy_count += 1; // dscal + daxpy 算作1次axpy操作
	}
	
	auto t_cg_end = high_resolution_clock::now();
	double t_cg_total = duration<double>(t_cg_end - t_cg_start).count();
	printf("[CG_DETAILED] 3. CG迭代计算: %.6f s (%d 迭代)\n", t_cg_total, iter+1);

	// 4. 资源清理
	auto t_cleanup_start = high_resolution_clock::now();
	if (d_r) cudaFree(d_r);
	if (d_p) cudaFree(d_p);
	if (d_Ap) cudaFree(d_Ap);
	if (dBuffer) cudaFree(dBuffer);
	if (matA) cusparseDestroySpMat(matA);
	if (vecP) cusparseDestroyDnVec(vecP);
	if (vecAp) cusparseDestroyDnVec(vecAp);
	if (cublasHandle) cublasDestroy(cublasHandle);
	if (cusparseHandle) cusparseDestroy(cusparseHandle);
	auto t_cleanup_end = high_resolution_clock::now();
	double t_cleanup = duration<double>(t_cleanup_end - t_cleanup_start).count();
	printf("[CG_DETAILED] 4. 资源清理: %.6f s\n", t_cleanup);

	// 详细子操作耗时分析
	printf("[CG_DETAILED] ===== CG迭代内部详细耗时 =====\n");
	printf("[CG_DETAILED] SpMV (A*p): %.6f s (%d次, 平均%.6f s) - %.1f%%\n", 
		   t_spmv_total, spmv_count, 
		   spmv_count > 0 ? t_spmv_total/spmv_count : 0,
		   t_cg_total > 0 ? t_spmv_total/t_cg_total*100 : 0);
	printf("[CG_DETAILED] 向量内积: %.6f s (%d次, 平均%.6f s) - %.1f%%\n", 
		   t_dot_total, dot_count,
		   dot_count > 0 ? t_dot_total/dot_count : 0,
		   t_cg_total > 0 ? t_dot_total/t_cg_total*100 : 0);
	printf("[CG_DETAILED] 向量加法: %.6f s (%d次, 平均%.6f s) - %.1f%%\n", 
		   t_axpy_total, axpy_count,
		   axpy_count > 0 ? t_axpy_total/axpy_count : 0,
		   t_cg_total > 0 ? t_axpy_total/t_cg_total*100 : 0);
	printf("[CG_DETAILED] 向量范数: %.6f s (%d次, 平均%.6f s) - %.1f%%\n", 
		   t_norm_total, norm_count,
		   norm_count > 0 ? t_norm_total/norm_count : 0,
		   t_cg_total > 0 ? t_norm_total/t_cg_total*100 : 0);

	double total_time = t_init + t_vec_init + t_cg_total + t_cleanup;
	printf("[CG_DETAILED] ===== CG求解器总耗时分解 =====\n");
	printf("[CG_DETAILED] 总时间: %.6f s\n", total_time);
	printf("[CG_DETAILED]   初始化: %.6f s (%.1f%%)\n", t_init, t_init/total_time*100);
	printf("[CG_DETAILED]   向量初始化: %.6f s (%.1f%%)\n", t_vec_init, t_vec_init/total_time*100);
	printf("[CG_DETAILED]   CG迭代: %.6f s (%.1f%%) ← 主要计算\n", t_cg_total, t_cg_total/total_time*100);
	printf("[CG_DETAILED]   资源清理: %.6f s (%.1f%%)\n", t_cleanup, t_cleanup/total_time*100);
	printf("[CG_DETAILED] ==========================================\n");

	return 0;
}

//==============================================================================
// GPU矩阵组装残差计算核函数声明
//==============================================================================

// GPU残差计算核函数的外部声明
extern "C" {
    int launchTrussResidualKernel(
        int numElements,
        const GPUElementData* d_elementData,
        const double* d_displacement,
        double* d_residualVector,
        double* d_tempVector,
        cudaStream_t stream = 0
    );
    
    int launchBeamResidualKernel(
        int numElements,
        const GPUElementData* d_elementData,
        const double* d_displacement,
        double* d_residualVector,
        double* d_tempVector,
        cudaStream_t stream = 0
    );
    
    int launchQuadResidualKernel(
        int numElements,
        const GPUElementData* d_elementData,
        const double* d_displacement,
        double* d_residualVector,
        double* d_tempVector,
        cudaStream_t stream = 0
    );
    
    int launchResidualAssemblyKernel(
        int numElements,
        const GPUElementData* d_elementData,
        const double* d_elementResidual,
        double* d_globalResidual,
        int globalSize,
        cudaStream_t stream = 0
    );
}

//==============================================================================
// GPU矩阵组装残差计算实现
//==============================================================================

int GPUSolver::computeElementResidual_GPU()
{
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "Starting GPU element residual computation");
    
    if (numElements <= 0 || !d_elementData || !d_elementResidual) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Invalid parameters for GPU residual computation");
        return -1;
    }
    
    // 初始化残差向量
    cudaError_t cudaError = cudaMemset(d_elementResidual, 0, 
        numElements * maxDOFPerElement * sizeof(double));
    if (cudaError != cudaSuccess) {
        std::string errorMsg = "Failed to initialize residual vector: ";
        errorMsg += cudaGetErrorString(cudaError);
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, errorMsg.c_str());
        return -1;
    }
    
    // 获取当前节点位移向量
    const Vector& currentU = getX();
    
    // 传输位移向量到GPU
    int systemSize = currentU.Size();
    double* d_displacement = nullptr;
    CHECK_CUDA_MALLOC((void**)&d_displacement, systemSize * sizeof(double));
    
    // 创建临时Host数组并批量复制Vector数据到GPU
    double* tempHostData = new double[systemSize];
    for (int i = 0; i < systemSize; i++) {
        tempHostData[i] = currentU(i);  // 使用operator()访问数据
    }
    
    cudaError = cudaMemcpy(d_displacement, tempHostData, 
                          systemSize * sizeof(double), cudaMemcpyHostToDevice);
    delete[] tempHostData;
    
    if (cudaError != cudaSuccess) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Failed to copy displacement to GPU");
        CHECK_CUDA_FREE(d_displacement);
        return -1;
    }
    
    int result = 0;
    
    // 计算Truss单元残差
    result = launchTrussResidualKernel(
        numElements, reinterpret_cast<const GPUElementData*>(d_elementData), d_displacement, 
        d_elementResidual, d_tempVector, computeStream
    );
    
    if (result != 0) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Truss residual kernel failed");
        CHECK_CUDA_FREE(d_displacement);
        return -1;
    }
    
    // 计算Beam单元残差
    result = launchBeamResidualKernel(
        numElements, reinterpret_cast<const GPUElementData*>(d_elementData), d_displacement, 
        d_elementResidual, d_tempVector, computeStream
    );
    
    if (result != 0) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Beam residual kernel failed");
        CHECK_CUDA_FREE(d_displacement);
        return -1;
    }
    
    // 计算Quad单元残差
    result = launchQuadResidualKernel(
        numElements, reinterpret_cast<const GPUElementData*>(d_elementData), d_displacement, 
        d_elementResidual, d_tempVector, computeStream
    );
    
    if (result != 0) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Quad residual kernel failed");
        CHECK_CUDA_FREE(d_displacement);
        return -1;
    }
    
    // 同步GPU计算流
    cudaError = cudaStreamSynchronize(computeStream);
    if (cudaError != cudaSuccess) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Failed to synchronize compute stream");
        CHECK_CUDA_FREE(d_displacement);
        return -1;
    }
    
    CHECK_CUDA_FREE(d_displacement);
    
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "GPU element residual computation completed successfully");
    return 0;
}

int GPUSolver::assembleGlobalVector_GPU()
{
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "Starting GPU global residual vector assembly");

    if (!d_elementResidual || !d_B) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Invalid parameters for global assembly");
        return -1;
    }

    // 获取全局系统大小
    int systemSize = getNumEqn();

    // ====== 性能优化：直接组装到d_B，避免临时缓冲区和回传CPU ======
    // 初始化全局残差向量为0
    cudaError_t cudaError = cudaMemset(d_B, 0, systemSize * sizeof(double));
    if (cudaError != cudaSuccess) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Failed to initialize d_B");
        return -1;
    }

    // 调用全局残差向量组装核函数（直接组装到d_B）
    int result = launchResidualAssemblyKernel(
        numElements, reinterpret_cast<const GPUElementData*>(d_elementData), d_elementResidual,
        d_B,  // ✅ 直接组装到d_B，而不是临时缓冲区
        systemSize, assemblyStream
    );

    if (result != 0) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Global residual assembly kernel failed");
        return -1;
    }

    // 同步组装流
    cudaError = cudaStreamSynchronize(assemblyStream);
    if (cudaError != cudaSuccess) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Failed to synchronize assembly stream");
        return -1;
    }

    // ✅ 标记RHS已在GPU上组装完成（避免solve()中重复传输）
    m_rhsOnGPU = true;
    GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO,
                    "RHS vector assembled on GPU, m_rhsOnGPU = true (skipped "
                    << systemSize * sizeof(double) / 1024.0 << " KB transfer)");

    // ❌ 不再拷贝回CPU（solver直接使用GPU上的d_B）
    // 旧代码：
    // double* tempData = new double[systemSize];
    // cudaMemcpy(tempData, d_globalResidual, systemSize * sizeof(double), D2H);
    // for (int i = 0; i < systemSize; i++) { mutableB(i) = tempData[i]; }
    // delete[] tempData; CHECK_CUDA_FREE(d_globalResidual);
    // ====== End of optimization ======
    
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO, "GPU global residual vector assembly completed successfully");
    return 0;
}

int GPUSolver::formUnbalance_GPU()
{
    GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, "Starting GPU unbalance force computation");
    
    // 检查初始化状态
    if (numElements <= 0 || !d_elementData) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "GPU element data not initialized for unbalance computation");
        return -1;
    }
    
    // 步骤1: 计算单元残差向量
    GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, "Computing element residual vectors on GPU");
    int result = computeElementResidual_GPU();
    if (result != 0) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Failed to compute element residuals on GPU");
        
        // 回退到CPU计算（如果需要）
        GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::WARNING, "GPU residual computation failed, considering CPU fallback");
        return -1;  // 开发阶段直接报错
    }
    
    // 步骤2: 组装全局不平衡力向量
    GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, "Assembling global unbalance vector on GPU");
    result = assembleGlobalVector_GPU();
    if (result != 0) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, "Failed to assemble global unbalance vector on GPU");
        return -1;
    }
    
    // 步骤3: 处理不支持GPU的单元（CPU回退）
    if (numUnsupportedElements > 0) {
        GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, "Processing " << numUnsupportedElements << 
                 " unsupported elements using CPU computation");
        
        // TODO: 实现CPU回退计算逻辑
        // 这里需要调用原有的CPU单元残差计算方法
        GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::WARNING, "CPU fallback for unsupported elements not yet implemented");
    }
    
    // 步骤4: 应用约束和边界条件
    // 这部分通常在CPU上处理，因为涉及复杂的约束逻辑
    GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::DEBUG, "Applying constraints and boundary conditions");
    
    // 输出统计信息
    if (numSupportedElements > 0) {
        double gpuPercentage = (double)numSupportedElements / (numSupportedElements + numUnsupportedElements) * 100.0;
        GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, "GPU unbalance computation completed: " << 
                 numSupportedElements << " GPU elements (" << gpuPercentage << "%), " <<
                 numUnsupportedElements << " CPU elements");
    }
    
    GPU_DEBUG_PRINT(DebugModule::MATRIX_ASSEMBLY, DebugLevel::INFO, "GPU unbalance force computation completed successfully");
    return 0;
}

//==============================================================================
// CPU-GPU数据同步系统实现
//==============================================================================

int GPUSolver::allocateNodeDataBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Allocating CPU-GPU node data synchronization buffers");
	
	// 释放现有缓冲区
	freeNodeDataBuffers();
	
	// 使用系统DOF数量作为节点数据大小
	nodeDataSize = getNumEqn();
	if (nodeDataSize <= 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid node data size: " << nodeDataSize);
		return -1;
	}
	
	try {
		// 分配GPU节点数据缓冲区
		size_t bufferSizeBytes = nodeDataSize * sizeof(double);
		
		CHECK_CUDA_MALLOC((void**)&d_nodeDisplacements, bufferSizeBytes);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU displacement buffer: " << bufferSizeBytes << " bytes");
		
		CHECK_CUDA_MALLOC((void**)&d_nodeVelocities, bufferSizeBytes);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU velocity buffer: " << bufferSizeBytes << " bytes");
		
		CHECK_CUDA_MALLOC((void**)&d_nodeAccelerations, bufferSizeBytes);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU acceleration buffer: " << bufferSizeBytes << " bytes");
		
		// 分配CPU临时缓冲区
		h_tempNodeBuffer = new double[nodeDataSize];
		if (!h_tempNodeBuffer) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Failed to allocate CPU temporary buffer");
			freeNodeDataBuffers();
			return -1;
		}
		
		// 初始化GPU缓冲区为0
		CHECK_CUDA(cudaMemset(d_nodeDisplacements, 0, bufferSizeBytes));
		CHECK_CUDA(cudaMemset(d_nodeVelocities, 0, bufferSizeBytes));
		CHECK_CUDA(cudaMemset(d_nodeAccelerations, 0, bufferSizeBytes));
		
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
			"Node data buffers allocated successfully - size: " << nodeDataSize << " DOF");
		
		return 0;
	}
	catch (...) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Exception during node data buffer allocation");
		freeNodeDataBuffers();
		return -1;
	}
}

void GPUSolver::freeNodeDataBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Freeing CPU-GPU node data synchronization buffers");
	
	// 释放GPU缓冲区
	CHECK_CUDA_FREE(d_nodeDisplacements);
	CHECK_CUDA_FREE(d_nodeVelocities);
	CHECK_CUDA_FREE(d_nodeAccelerations);
	
	// 释放CPU临时缓冲区
	if (h_tempNodeBuffer) {
		delete[] h_tempNodeBuffer;
		h_tempNodeBuffer = nullptr;
	}
	
	// 重置同步状态
	nodeDataSynchronized = false;
	nodeDataSize = 0;
	lastSyncTime = 0.0;
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Node data buffers freed successfully");
}

int GPUSolver::copyVectorToGPUBuffer(const Vector& source, double* d_target)
{
	if (!d_target || !h_tempNodeBuffer) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid GPU buffer or CPU temp buffer for vector copy");
		return -1;
	}
	
	if (!validateNodeDataSize(source)) {
		return -1;
	}
	
	// 先复制Vector数据到临时CPU缓冲区
	for (int i = 0; i < nodeDataSize; i++) {
		h_tempNodeBuffer[i] = source(i);
	}
	
	// 从CPU缓冲区传输到GPU
	cudaError_t result = cudaMemcpy(d_target, h_tempNodeBuffer, 
	                               nodeDataSize * sizeof(double), cudaMemcpyHostToDevice);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to copy vector to GPU: " << cudaGetErrorString(result));
		return -1;
	}
	
	return 0;
}

int GPUSolver::copyGPUBufferToVector(double* d_source, Vector& target)
{
	if (!d_source || !h_tempNodeBuffer) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid GPU buffer or CPU temp buffer for vector copy");
		return -1;
	}
	
	if (!validateNodeDataSize(target)) {
		return -1;
	}
	
	// 从GPU传输到CPU临时缓冲区
	cudaError_t result = cudaMemcpy(h_tempNodeBuffer, d_source, 
	                               nodeDataSize * sizeof(double), cudaMemcpyDeviceToHost);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to copy GPU buffer to CPU: " << cudaGetErrorString(result));
		return -1;
	}
	
	// 从临时CPU缓冲区复制到Vector
	for (int i = 0; i < nodeDataSize; i++) {
		target(i) = h_tempNodeBuffer[i];
	}
	
	return 0;
}

bool GPUSolver::validateNodeDataSize(const Vector& data)
{
	if (data.Size() != nodeDataSize) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Vector size mismatch - expected: " << nodeDataSize << 
			", actual: " << data.Size());
		return false;
	}
	return true;
}

void GPUSolver::updateSyncTimestamp()
{
	// 使用高精度计时器更新时间戳
	auto now = std::chrono::high_resolution_clock::now();
	auto duration = now.time_since_epoch();
	lastSyncTime = std::chrono::duration<double>(duration).count();
}

// ====== CPU-GPU数据同步公共接口实现 ======

int GPUSolver::syncNodeDisplacementsToGPU(const Vector& displacements)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing node displacements to GPU");
	
	// 确保缓冲区已分配
	if (!d_nodeDisplacements) {
		int result = allocateNodeDataBuffers();
		if (result != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Failed to allocate node data buffers");
			return -1;
		}
	}
	
	// 执行数据传输
	int result = copyVectorToGPUBuffer(displacements, d_nodeDisplacements);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync displacements to GPU");
		return -1;
	}
	
	updateSyncTimestamp();
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Node displacements synchronized successfully - " << displacements.Size() << " DOF");
	
	return 0;
}

int GPUSolver::syncNodeVelocitiesToGPU(const Vector& velocities)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing node velocities to GPU");
	
	// 确保缓冲区已分配
	if (!d_nodeVelocities) {
		int result = allocateNodeDataBuffers();
		if (result != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Failed to allocate node data buffers");
			return -1;
		}
	}
	
	// 执行数据传输
	int result = copyVectorToGPUBuffer(velocities, d_nodeVelocities);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync velocities to GPU");
		return -1;
	}
	
	updateSyncTimestamp();
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Node velocities synchronized successfully - " << velocities.Size() << " DOF");
	
	return 0;
}

int GPUSolver::syncNodeAccelerationsToGPU(const Vector& accelerations)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing node accelerations to GPU");
	
	// 确保缓冲区已分配
	if (!d_nodeAccelerations) {
		int result = allocateNodeDataBuffers();
		if (result != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Failed to allocate node data buffers");
			return -1;
		}
	}
	
	// 执行数据传输
	int result = copyVectorToGPUBuffer(accelerations, d_nodeAccelerations);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync accelerations to GPU");
		return -1;
	}
	
	updateSyncTimestamp();
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Node accelerations synchronized successfully - " << accelerations.Size() << " DOF");
	
	return 0;
}

int GPUSolver::syncAllNodeDataToGPU(const Vector& displacements, 
                                   const Vector& velocities, 
                                   const Vector& accelerations)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing all node data to GPU");
	
	startTimer(TimerType::DATA_SYNCHRONIZATION);
	
	// 同步位移
	int result = syncNodeDisplacementsToGPU(displacements);
	if (result != 0) {
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	// 同步速度
	result = syncNodeVelocitiesToGPU(velocities);
	if (result != 0) {
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	// 同步加速度
	result = syncNodeAccelerationsToGPU(accelerations);
	if (result != 0) {
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	nodeDataSynchronized = true;
	stopTimer(TimerType::DATA_SYNCHRONIZATION);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"All node data synchronized to GPU successfully");
	
	return 0;
}

int GPUSolver::syncNodeDisplacementsFromGPU(Vector& displacements)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing node displacements from GPU to CPU");
	
	if (!d_nodeDisplacements) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"GPU displacement buffer not allocated");
		return -1;
	}
	
	int result = copyGPUBufferToVector(d_nodeDisplacements, displacements);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync displacements from GPU");
		return -1;
	}
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Node displacements synchronized from GPU successfully");
	
	return 0;
}

int GPUSolver::syncAllNodeDataFromGPU(Vector& displacements, 
                                     Vector& velocities, 
                                     Vector& accelerations)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing all node data from GPU to CPU");
	
	startTimer(TimerType::DATA_SYNCHRONIZATION);
	
	// 同步位移
	int result = syncNodeDisplacementsFromGPU(displacements);
	if (result != 0) {
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	// 同步速度
	result = copyGPUBufferToVector(d_nodeVelocities, velocities);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync velocities from GPU");
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	// 同步加速度
	result = copyGPUBufferToVector(d_nodeAccelerations, accelerations);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync accelerations from GPU");
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	stopTimer(TimerType::DATA_SYNCHRONIZATION);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"All node data synchronized from GPU to CPU successfully");
	
	return 0;
}

// ====== 数据同步状态查询和控制 ======

bool GPUSolver::isNodeDataSynchronized()
{
	return nodeDataSynchronized;
}

void GPUSolver::markNodeDataDirty()
{
	nodeDataSynchronized = false;
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Node data marked as dirty - synchronization required");
}

double GPUSolver::getLastSyncTime()
{
	return lastSyncTime;
}

void GPUSolver::enableAutomaticSync(bool enabled)
{
	automaticSyncEnabled = enabled;
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Automatic synchronization " << (enabled ? "enabled" : "disabled"));
}

void GPUSolver::disableAutomaticSync()
{
	enableAutomaticSync(false);
}

bool GPUSolver::isAutomaticSyncEnabled()
{
	return automaticSyncEnabled;
}

//==============================================================================
// 材料状态变量同步系统实现
//==============================================================================

int GPUSolver::allocateMaterialStateBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Allocating material state synchronization buffers");
	
	// 释放现有缓冲区
	freeMaterialStateBuffers();
	
	// 设置材料状态缓冲区参数
	maxMaterialStates = numElements > 0 ? numElements : 100;  // 基于单元数量或默认值
	materialStateSize = 6;      // 应力/应变分量数量 (3D: σxx,σyy,σzz,τxy,τyz,τzx)
	materialPropertySize = 16;  // 材料属性参数数量 (E, ν, ρ, etc.)
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Material buffer parameters - maxStates: " << maxMaterialStates << 
		", stateSize: " << materialStateSize << 
		", propertySize: " << materialPropertySize);
	
	try {
		// 分配GPU材料状态缓冲区
		size_t stressBufferSize = maxMaterialStates * materialStateSize * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_materialStress, stressBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU stress buffer: " << stressBufferSize << " bytes");
		
		size_t strainBufferSize = maxMaterialStates * materialStateSize * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_materialStrain, strainBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU strain buffer: " << strainBufferSize << " bytes");
		
		size_t propertyBufferSize = maxMaterialStates * materialPropertySize * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_materialProperties, propertyBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU properties buffer: " << propertyBufferSize << " bytes");
		
		// 分配材料状态映射表
		size_t mapBufferSize = maxMaterialStates * sizeof(int);
		CHECK_CUDA_MALLOC((void**)&d_materialStateMap, mapBufferSize);
		
		// 分配CPU临时缓冲区
		size_t tempBufferSize = std::max({stressBufferSize, strainBufferSize, propertyBufferSize});
		h_tempMaterialBuffer = new double[tempBufferSize / sizeof(double)];
		if (!h_tempMaterialBuffer) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Failed to allocate CPU temporary material buffer");
			freeMaterialStateBuffers();
			return -1;
		}
		
		// 初始化GPU缓冲区
		CHECK_CUDA(cudaMemset(d_materialStress, 0, stressBufferSize));
		CHECK_CUDA(cudaMemset(d_materialStrain, 0, strainBufferSize));
		CHECK_CUDA(cudaMemset(d_materialProperties, 0, propertyBufferSize));
		CHECK_CUDA(cudaMemset(d_materialStateMap, -1, mapBufferSize)); // -1表示未使用
		
		// 初始化活跃材料列表
		activeMaterials.clear();
		activeMaterials.reserve(maxMaterialStates);
		
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
			"Material state buffers allocated successfully");
		
		return 0;
	}
	catch (...) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Exception during material state buffer allocation");
		freeMaterialStateBuffers();
		return -1;
	}
}

void GPUSolver::freeMaterialStateBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG,
		"Freeing material state synchronization buffers");

	// 释放GPU缓冲区
	CHECK_CUDA_FREE(d_materialStress);
	CHECK_CUDA_FREE(d_materialStrain);
	CHECK_CUDA_FREE(d_materialProperties);
	CHECK_CUDA_FREE(d_materialStateMap);

	// 释放CPU临时缓冲区
	if (h_tempMaterialBuffer) {
		delete[] h_tempMaterialBuffer;
		h_tempMaterialBuffer = nullptr;
	}

	// 清空活跃材料列表
	activeMaterials.clear();

	// 重置状态
	materialDataSynchronized = false;
	materialSyncTime = 0.0;
	maxMaterialStates = 0;
	materialStateSize = 0;
	materialPropertySize = 0;

	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO,
		"Material state buffers freed successfully");
}

// ====== Nonlinear Material State Management Implementation ======
// 非线性材料状态变量内存管理（用于GPU本构计算）

/**
 * @brief 分配非线性材料状态变量GPU内存
 *
 * 为每个高斯积分点分配材料状态变量（ConcreteState）和总应变历史
 *
 * 内存布局：
 * - d_materialStates: ConcreteState数组（每个高斯点一个）
 * - d_strainTotal: 总应变数组（每个高斯点6个分量：εxx,εyy,εzz,γxy,γyz,γzx）
 *
 * @return 0-成功，-1-失败
 */
int GPUSolver::allocateNonlinearMaterialStates()
{
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
		"Allocating nonlinear material state variables on GPU");

	// 释放现有缓冲区（如果存在）
	freeNonlinearMaterialStates();

	// 计算高斯积分点总数
	// 假设每个Quad单元有4个高斯点（2x2高斯积分）
	// TODO: 未来支持不同单元类型时需要根据单元类型动态计算
	const int gaussPointsPerQuad = 4;
	numGaussPoints = numSupportedElements * gaussPointsPerQuad;

	if (numGaussPoints <= 0) {
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::WARNING,
			"No Gauss points to allocate (numSupportedElements=" << numSupportedElements << ")");
		return 0; // 不是错误，只是没有需要分配的内存
	}

	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
		"Allocating material states - numElements: " << numSupportedElements <<
		", numGaussPoints: " << numGaussPoints);

	try {
		// 分配双状态缓冲区（committed + trial）
		size_t stateBufferSize = numGaussPoints * sizeof(ConcreteState);
		CHECK_CUDA_MALLOC(&d_materialStates_committed, stateBufferSize);
		CHECK_CUDA_MALLOC(&d_materialStates_trial, stateBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Allocated dual state buffers: " << stateBufferSize << " bytes each (" <<
			numGaussPoints << " x " << sizeof(ConcreteState) << " bytes)");

		// 分配总应变历史数组（每个高斯点6个分量）
		size_t strainBufferSize = numGaussPoints * 6 * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_strainTotal, strainBufferSize);
		CHECK_CUDA_MALLOC((void**)&d_strainCurrent_trial, strainBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Allocated strain buffers: " << strainBufferSize << " bytes each (" <<
			numGaussPoints << " x 6 components)");

		// 初始化GPU缓冲区为零
		CHECK_CUDA(cudaMemset(d_materialStates_committed, 0, stateBufferSize));
		CHECK_CUDA(cudaMemset(d_materialStates_trial, 0, stateBufferSize));
		CHECK_CUDA(cudaMemset(d_strainTotal, 0, strainBufferSize));
		CHECK_CUDA(cudaMemset(d_strainCurrent_trial, 0, strainBufferSize));

		// 调用初始化kernel设置损伤阈值（rp = ft/E, rn = fc/E）
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
			"Initializing material damage thresholds from element material parameters");

		DEBUG_COUT << "[DEBUG] Calling initializeMaterialStates with:\n";
		DEBUG_COUT << "  numGaussPoints: " << numGaussPoints << "\n";
		DEBUG_COUT << "  d_elementData: " << d_elementData << "\n";
		DEBUG_COUT << "  d_materialStates_committed: " << d_materialStates_committed << "\n";
		DEBUG_COUT << "  d_materialStates_trial: " << d_materialStates_trial << "\n";
		DEBUG_COUT << "  numSupportedElements: " << numSupportedElements << "\n";
		DEBUG_COUT << "  gaussPointsPerQuad: " << gaussPointsPerQuad << "\n";
		std::cout.flush();

		// 初始化committed状态
		int initRet = initializeMaterialStates(
			numGaussPoints,
			d_elementData,  // GPUElementData数组
			static_cast<ConcreteState*>(d_materialStates_committed),
			numSupportedElements,
			gaussPointsPerQuad,
			0  // 使用默认stream
		);

		DEBUG_COUT << "[DEBUG] initializeMaterialStates returned: " << initRet << "\n";
		std::cout.flush();

		if (initRet != 0) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to initialize material states, error code: " << initRet);
			freeNonlinearMaterialStates();
			return -1;
		}

		// 复制committed状态到trial状态
		CHECK_CUDA(cudaMemcpy(d_materialStates_trial, d_materialStates_committed,
		                      stateBufferSize, cudaMemcpyDeviceToDevice));

		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
			"Nonlinear material states allocated and initialized successfully");

		return 0;
	}
	catch (...) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
			"Exception during nonlinear material state allocation");
		freeNonlinearMaterialStates();
		return -1;
	}
}

/**
 * @brief 释放非线性材料状态变量GPU内存
 */
void GPUSolver::freeNonlinearMaterialStates()
{
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
		"Freeing nonlinear material state variables");

	// 释放双状态缓冲区
	CHECK_CUDA_FREE(d_materialStates_committed);
	CHECK_CUDA_FREE(d_materialStates_trial);
	CHECK_CUDA_FREE(d_strainTotal);
	CHECK_CUDA_FREE(d_strainCurrent_trial);

	// 重置状态
	numGaussPoints = 0;

	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG,
		"Nonlinear material states freed successfully");
}

int GPUSolver::copyMaterialDataToGPU(const double* source, double* d_target, int dataSize)
{
	if (!source || !d_target || dataSize <= 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid parameters for material data GPU copy");
		return -1;
	}
	
	cudaError_t result = cudaMemcpy(d_target, source, 
	                               dataSize * sizeof(double), cudaMemcpyHostToDevice);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to copy material data to GPU: " << cudaGetErrorString(result));
		return -1;
	}
	
	return 0;
}

int GPUSolver::copyMaterialDataFromGPU(double* d_source, double* target, int dataSize)
{
	if (!d_source || !target || dataSize <= 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid parameters for material data CPU copy");
		return -1;
	}
	
	cudaError_t result = cudaMemcpy(target, d_source, 
	                               dataSize * sizeof(double), cudaMemcpyDeviceToHost);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to copy material data from GPU: " << cudaGetErrorString(result));
		return -1;
	}
	
	return 0;
}

bool GPUSolver::validateMaterialStateSize(int stateSize)
{
	if (stateSize != materialStateSize) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Material state size mismatch - expected: " << materialStateSize << 
			", actual: " << stateSize);
		return false;
	}
	return true;
}

void GPUSolver::updateMaterialSyncTimestamp()
{
	auto now = std::chrono::high_resolution_clock::now();
	auto duration = now.time_since_epoch();
	materialSyncTime = std::chrono::duration<double>(duration).count();
}

int GPUSolver::findMaterialSlot(int elementTag)
{
	for (size_t i = 0; i < activeMaterials.size(); i++) {
		if (activeMaterials[i] == elementTag) {
			return static_cast<int>(i);
		}
	}
	return -1; // 未找到
}

int GPUSolver::addActiveMaterial(int elementTag)
{
	// 检查是否已存在
	int slot = findMaterialSlot(elementTag);
	if (slot >= 0) {
		return slot;
	}
	
	// 检查容量限制
	if (activeMaterials.size() >= static_cast<size_t>(maxMaterialStates)) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Maximum material states exceeded: " << maxMaterialStates);
		return -1;
	}
	
	// 添加新材料
	activeMaterials.push_back(elementTag);
	slot = static_cast<int>(activeMaterials.size() - 1);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Added material for element " << elementTag << " at slot " << slot);
	
	return slot;
}

void GPUSolver::removeActiveMaterial(int elementTag)
{
	auto it = std::find(activeMaterials.begin(), activeMaterials.end(), elementTag);
	if (it != activeMaterials.end()) {
		activeMaterials.erase(it);
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
			"Removed material for element " << elementTag);
		materialDataSynchronized = false; // 标记需要重新同步
	}
}

// ====== 材料状态同步公共接口实现 ======

int GPUSolver::initializeMaterialStateBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Initializing material state buffers");
	
	return allocateMaterialStateBuffers();
}

int GPUSolver::syncMaterialStatesToGPU()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing material states to GPU (simplified interface)");
	
	// 这是简化版本的材料状态同步，直接调用批量同步
	return syncAllMaterialStatesToGPU();
}

int GPUSolver::syncMaterialStressToGPU(int elementTag, const double* stress, int stressSize)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Synchronizing stress for element " << elementTag);
	
	// 确保缓冲区已分配
	if (!d_materialStress) {
		int result = allocateMaterialStateBuffers();
		if (result != 0) {
			return -1;
		}
	}
	
	if (!validateMaterialStateSize(stressSize)) {
		return -1;
	}
	
	// 获取或分配材料槽位
	int slot = addActiveMaterial(elementTag);
	if (slot < 0) {
		return -1;
	}
	
	// 计算目标GPU地址
	double* d_targetStress = d_materialStress + slot * materialStateSize;
	
	// 传输应力数据
	int result = copyMaterialDataToGPU(stress, d_targetStress, stressSize);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync stress for element " << elementTag);
		return -1;
	}
	
	updateMaterialSyncTimestamp();
	return 0;
}

int GPUSolver::syncMaterialStrainToGPU(int elementTag, const double* strain, int strainSize)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Synchronizing strain for element " << elementTag);
	
	// 确保缓冲区已分配
	if (!d_materialStrain) {
		int result = allocateMaterialStateBuffers();
		if (result != 0) {
			return -1;
		}
	}
	
	if (!validateMaterialStateSize(strainSize)) {
		return -1;
	}
	
	// 获取或分配材料槽位
	int slot = addActiveMaterial(elementTag);
	if (slot < 0) {
		return -1;
	}
	
	// 计算目标GPU地址
	double* d_targetStrain = d_materialStrain + slot * materialStateSize;
	
	// 传输应变数据
	int result = copyMaterialDataToGPU(strain, d_targetStrain, strainSize);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync strain for element " << elementTag);
		return -1;
	}
	
	updateMaterialSyncTimestamp();
	return 0;
}

int GPUSolver::syncMaterialPropertiesToGPU(int elementTag, const double* properties, int propSize)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Synchronizing properties for element " << elementTag);
	
	// 确保缓冲区已分配
	if (!d_materialProperties) {
		int result = allocateMaterialStateBuffers();
		if (result != 0) {
			return -1;
		}
	}
	
	if (propSize > materialPropertySize) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Property size exceeds buffer capacity - max: " << materialPropertySize << 
			", actual: " << propSize);
		return -1;
	}
	
	// 获取或分配材料槽位
	int slot = addActiveMaterial(elementTag);
	if (slot < 0) {
		return -1;
	}
	
	// 计算目标GPU地址
	double* d_targetProperties = d_materialProperties + slot * materialPropertySize;
	
	// 传输材料属性数据
	int result = copyMaterialDataToGPU(properties, d_targetProperties, propSize);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to sync properties for element " << elementTag);
		return -1;
	}
	
	updateMaterialSyncTimestamp();
	return 0;
}

int GPUSolver::syncAllMaterialStatesToGPU()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing all material states to GPU");
	
	startTimer(TimerType::DATA_SYNCHRONIZATION);
	
	// 确保缓冲区已分配
	if (!d_materialStress || !d_materialStrain || !d_materialProperties) {
		int result = allocateMaterialStateBuffers();
		if (result != 0) {
			stopTimer(TimerType::DATA_SYNCHRONIZATION);
			return -1;
		}
	}
	
	// TODO: 实现从Domain获取所有材料状态的逻辑
	// 这里需要遍历Domain中的所有Element，提取Material状态
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
		"Domain-based material state extraction not yet implemented");
	
	materialDataSynchronized = true;
	updateMaterialSyncTimestamp();
	stopTimer(TimerType::DATA_SYNCHRONIZATION);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"All material states synchronized to GPU - " << activeMaterials.size() << " materials");
	
	return 0;
}

int GPUSolver::syncMaterialStatesFromGPU()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Synchronizing material states from GPU to CPU");
	
	if (!d_materialStress || !d_materialStrain) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"GPU material buffers not allocated");
		return -1;
	}
	
	startTimer(TimerType::DATA_SYNCHRONIZATION);
	
	// TODO: 实现将GPU材料状态更新回CPU Domain的逻辑
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
		"GPU-to-CPU material state update not yet implemented");
	
	stopTimer(TimerType::DATA_SYNCHRONIZATION);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Material states synchronized from GPU to CPU");
	
	return 0;
}

// ====== 材料状态查询和控制 ======

bool GPUSolver::isMaterialDataSynchronized()
{
	return materialDataSynchronized;
}

void GPUSolver::markMaterialDataDirty()
{
	materialDataSynchronized = false;
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Material data marked as dirty - synchronization required");
}

int GPUSolver::getMaterialDataCount()
{
	return static_cast<int>(activeMaterials.size());
}

double GPUSolver::getMaterialSyncTime()
{
	return materialSyncTime;
}

//==============================================================================
// 单元几何/拓扑数据实时更新系统实现
//==============================================================================

int GPUSolver::allocateGeometryBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Allocating element geometry update buffers");
	
	// 释放现有缓冲区
	freeGeometryBuffers();
	
	// 设置几何缓冲区参数
	maxGeometryNodes = numElements > 0 ? numElements * 8 : 800;  // 假设每个单元最多8个节点
	int maxCoordinates = maxGeometryNodes * 3;  // 3D坐标
	int maxConnectivity = numElements > 0 ? numElements * 8 : 800;  // 连接关系
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Geometry buffer parameters - maxNodes: " << maxGeometryNodes << 
		", maxCoords: " << maxCoordinates << 
		", maxConnectivity: " << maxConnectivity);
	
	try {
		// 分配GPU几何缓冲区
		size_t coordBufferSize = maxCoordinates * sizeof(double);
		CHECK_CUDA_MALLOC((void**)&d_elementCoordinates, coordBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU element coordinates buffer: " << coordBufferSize << " bytes");
		
		CHECK_CUDA_MALLOC((void**)&d_deformedCoordinates, coordBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU deformed coordinates buffer: " << coordBufferSize << " bytes");
		
		CHECK_CUDA_MALLOC((void**)&d_referenceGeometry, coordBufferSize);
		GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
			"Allocated GPU reference geometry buffer: " << coordBufferSize << " bytes");
		
		// 分配变形梯度缓冲区 (3x3矩阵每个单元)
		size_t gradientBufferSize = numElements * 9 * sizeof(double);
		if (numElements > 0) {
			CHECK_CUDA_MALLOC((void**)&d_deformationGradient, gradientBufferSize);
			GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::DEBUG, 
				"Allocated GPU deformation gradient buffer: " << gradientBufferSize << " bytes");
		}
		
		// 分配连接关系缓冲区
		size_t connectivityBufferSize = maxConnectivity * sizeof(int);
		CHECK_CUDA_MALLOC((void**)&d_elementConnectivity, connectivityBufferSize);
		
		// 分配CPU临时缓冲区
		size_t tempBufferSize = std::max(coordBufferSize, connectivityBufferSize);
		h_tempGeometryBuffer = new double[tempBufferSize / sizeof(double)];
		if (!h_tempGeometryBuffer) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Failed to allocate CPU temporary geometry buffer");
			freeGeometryBuffers();
			return -1;
		}
		
		// 初始化GPU缓冲区
		CHECK_CUDA(cudaMemset(d_elementCoordinates, 0, coordBufferSize));
		CHECK_CUDA(cudaMemset(d_deformedCoordinates, 0, coordBufferSize));
		CHECK_CUDA(cudaMemset(d_referenceGeometry, 0, coordBufferSize));
		CHECK_CUDA(cudaMemset(d_elementConnectivity, 0, connectivityBufferSize));
		
		if (d_deformationGradient && numElements > 0) {
			CHECK_CUDA(cudaMemset(d_deformationGradient, 0, gradientBufferSize));
		}
		
		// 初始化已更新单元列表
		updatedElements.clear();
		updatedElements.reserve(numElements > 0 ? numElements : 100);
		
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
			"Geometry update buffers allocated successfully");
		
		return 0;
	}
	catch (...) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Exception during geometry buffer allocation");
		freeGeometryBuffers();
		return -1;
	}
}

void GPUSolver::freeGeometryBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Freeing element geometry update buffers");
	
	// 释放GPU缓冲区
	CHECK_CUDA_FREE(d_elementCoordinates);
	CHECK_CUDA_FREE(d_deformedCoordinates);
	CHECK_CUDA_FREE(d_deformationGradient);
	CHECK_CUDA_FREE(d_elementConnectivity);
	CHECK_CUDA_FREE(d_referenceGeometry);
	
	// 释放CPU临时缓冲区
	if (h_tempGeometryBuffer) {
		delete[] h_tempGeometryBuffer;
		h_tempGeometryBuffer = nullptr;
	}
	
	// 清空已更新单元列表
	updatedElements.clear();
	
	// 重置状态
	elementGeometryUpdated = false;
	geometryUpdateTime = 0.0;
	updatedElementCount = 0;
	maxGeometryNodes = 0;
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Geometry update buffers freed successfully");
}

int GPUSolver::copyGeometryDataToGPU(const double* source, double* d_target, int dataSize)
{
	if (!source || !d_target || dataSize <= 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid parameters for geometry data GPU copy");
		return -1;
	}
	
	cudaError_t result = cudaMemcpy(d_target, source, 
	                               dataSize * sizeof(double), cudaMemcpyHostToDevice);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to copy geometry data to GPU: " << cudaGetErrorString(result));
		return -1;
	}
	
	return 0;
}

int GPUSolver::copyGeometryDataFromGPU(double* d_source, double* target, int dataSize)
{
	if (!d_source || !target || dataSize <= 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid parameters for geometry data CPU copy");
		return -1;
	}
	
	cudaError_t result = cudaMemcpy(target, d_source, 
	                               dataSize * sizeof(double), cudaMemcpyDeviceToHost);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to copy geometry data from GPU: " << cudaGetErrorString(result));
		return -1;
	}
	
	return 0;
}

bool GPUSolver::validateGeometrySize(int geometrySize)
{
	if (geometrySize <= 0 || geometrySize > maxGeometryNodes * 3) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid geometry size - size: " << geometrySize << 
			", max allowed: " << maxGeometryNodes * 3);
		return false;
	}
	return true;
}

void GPUSolver::updateGeometryTimestamp()
{
	auto now = std::chrono::high_resolution_clock::now();
	auto duration = now.time_since_epoch();
	geometryUpdateTime = std::chrono::duration<double>(duration).count();
}

int GPUSolver::findElementGeometrySlot(int elementTag)
{
	for (size_t i = 0; i < updatedElements.size(); i++) {
		if (updatedElements[i] == elementTag) {
			return static_cast<int>(i);
		}
	}
	return -1; // 未找到
}

int GPUSolver::addUpdatedElement(int elementTag)
{
	// 检查是否已存在
	int slot = findElementGeometrySlot(elementTag);
	if (slot >= 0) {
		return slot;
	}
	
	// 检查容量限制
	if (updatedElements.size() >= static_cast<size_t>(numElements) && numElements > 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Maximum updated elements exceeded: " << numElements);
		return -1;
	}
	
	// 添加新单元
	updatedElements.push_back(elementTag);
	slot = static_cast<int>(updatedElements.size() - 1);
	updatedElementCount++;
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Added geometry update for element " << elementTag << " at slot " << slot);
	
	return slot;
}

void GPUSolver::removeUpdatedElement(int elementTag)
{
	auto it = std::find(updatedElements.begin(), updatedElements.end(), elementTag);
	if (it != updatedElements.end()) {
		updatedElements.erase(it);
		updatedElementCount--;
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
			"Removed geometry update for element " << elementTag);
		elementGeometryUpdated = false; // 标记需要重新更新
	}
}

int GPUSolver::computeDeformedCoordinates(const Vector& displacements)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Computing deformed coordinates from displacements");
	
	if (!d_elementCoordinates || !d_deformedCoordinates) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Geometry buffers not allocated");
		return -1;
	}
	
	// TODO: 实现GPU核函数来计算变形后坐标
	// deformed_coords = reference_coords + displacements
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
		"GPU deformed coordinates computation not yet implemented");
	
	// 临时实现：简单复制参考坐标到变形坐标
	size_t coordBufferSize = maxGeometryNodes * 3 * sizeof(double);
	cudaError_t result = cudaMemcpy(d_deformedCoordinates, d_elementCoordinates, 
	                               coordBufferSize, cudaMemcpyDeviceToDevice);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to copy coordinates: " << cudaGetErrorString(result));
		return -1;
	}
	
	return 0;
}

int GPUSolver::updateElementJacobian(int elementTag)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Updating Jacobian for element " << elementTag);
	
	// TODO: 实现单元Jacobian矩阵更新
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
		"Element Jacobian update not yet implemented");
	
	return 0;
}

// ====== 几何更新公共接口实现 ======

int GPUSolver::initializeGeometryBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Initializing geometry update buffers");
	
	return allocateGeometryBuffers();
}

int GPUSolver::updateElementGeometry()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Updating element geometry data");
	
	startTimer(TimerType::DATA_SYNCHRONIZATION);
	
	// 确保缓冲区已分配
	if (!d_elementCoordinates) {
		int result = allocateGeometryBuffers();
		if (result != 0) {
			stopTimer(TimerType::DATA_SYNCHRONIZATION);
			return -1;
		}
	}
	
	// TODO: 实现从Domain获取几何数据的逻辑
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
		"Domain-based geometry data extraction not yet implemented");
	
	elementGeometryUpdated = true;
	updateGeometryTimestamp();
	stopTimer(TimerType::DATA_SYNCHRONIZATION);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Element geometry updated - " << updatedElementCount << " elements");
	
	return 0;
}

int GPUSolver::updateElementCoordinates(int elementTag, const double* coords, int coordSize)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Updating coordinates for element " << elementTag);
	
	// 确保缓冲区已分配
	if (!d_elementCoordinates) {
		int result = allocateGeometryBuffers();
		if (result != 0) {
			return -1;
		}
	}
	
	if (!validateGeometrySize(coordSize)) {
		return -1;
	}
	
	// 获取或分配几何槽位
	int slot = addUpdatedElement(elementTag);
	if (slot < 0) {
		return -1;
	}
	
	// 计算目标GPU地址 (假设每个单元24个坐标：8节点×3坐标)
	double* d_targetCoords = d_elementCoordinates + slot * 24;
	
	// 传输坐标数据
	int result = copyGeometryDataToGPU(coords, d_targetCoords, coordSize);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to update coordinates for element " << elementTag);
		return -1;
	}
	
	updateGeometryTimestamp();
	return 0;
}

int GPUSolver::updateElementConnectivity(int elementTag, const int* connectivity, int nodeCount)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Updating connectivity for element " << elementTag);
	
	// 确保缓冲区已分配
	if (!d_elementConnectivity) {
		int result = allocateGeometryBuffers();
		if (result != 0) {
			return -1;
		}
	}
	
	if (nodeCount <= 0 || nodeCount > 8) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Invalid node count: " << nodeCount);
		return -1;
	}
	
	// 获取或分配几何槽位
	int slot = addUpdatedElement(elementTag);
	if (slot < 0) {
		return -1;
	}
	
	// 计算目标GPU地址
	int* d_targetConnectivity = d_elementConnectivity + slot * 8;  // 最多8个节点
	
	// 传输连接关系数据
	cudaError_t result = cudaMemcpy(d_targetConnectivity, connectivity, 
	                               nodeCount * sizeof(int), cudaMemcpyHostToDevice);
	if (result != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to update connectivity for element " << elementTag << 
			": " << cudaGetErrorString(result));
		return -1;
	}
	
	updateGeometryTimestamp();
	return 0;
}

int GPUSolver::updateDeformedGeometry(const Vector& displacements)
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Updating deformed geometry from displacement field");
	
	startTimer(TimerType::DATA_SYNCHRONIZATION);
	
	// 首先同步位移数据到GPU
	int result = syncNodeDisplacementsToGPU(displacements);
	if (result != 0) {
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	// 计算变形后坐标
	result = computeDeformedCoordinates(displacements);
	if (result != 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to compute deformed coordinates");
		stopTimer(TimerType::DATA_SYNCHRONIZATION);
		return -1;
	}
	
	elementGeometryUpdated = true;
	updateGeometryTimestamp();
	stopTimer(TimerType::DATA_SYNCHRONIZATION);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Deformed geometry updated successfully");
	
	return 0;
}

// ====== 大变形支持 ======

int GPUSolver::enableLargeDeformation(bool enabled)
{
	largeDeformationEnabled = enabled;
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Large deformation mode " << (enabled ? "enabled" : "disabled"));
	
	if (enabled) {
		// 确保变形梯度缓冲区已分配
		if (!d_deformationGradient && numElements > 0) {
			int result = allocateGeometryBuffers();
			if (result != 0) {
				largeDeformationEnabled = false;
				return -1;
			}
		}
	}
	
	return 0;
}

bool GPUSolver::isLargeDeformationEnabled()
{
	return largeDeformationEnabled;
}

int GPUSolver::updateDeformationGradient()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Updating deformation gradient");
	
	if (!largeDeformationEnabled) {
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
			"Large deformation mode not enabled");
		return -1;
	}
	
	if (!d_deformationGradient) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Deformation gradient buffer not allocated");
		return -1;
	}
	
	// TODO: 实现变形梯度计算
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
		"Deformation gradient computation not yet implemented");
	
	return 0;
}

int GPUSolver::computeUpdatedLagrangianGeometry()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Computing Updated Lagrangian geometry");
	
	if (!largeDeformationEnabled) {
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
			"Large deformation mode not enabled");
		return -1;
	}
	
	// TODO: 实现Updated Lagrangian几何计算
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::WARNING, 
		"Updated Lagrangian geometry computation not yet implemented");
	
	return 0;
}

// ====== 几何状态查询和控制 ======

bool GPUSolver::isElementGeometryUpdated()
{
	return elementGeometryUpdated;
}

void GPUSolver::markElementGeometryDirty()
{
	elementGeometryUpdated = false;
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Element geometry marked as dirty - update required");
}

double GPUSolver::getGeometryUpdateTime()
{
	return geometryUpdateTime;
}

int GPUSolver::getUpdatedElementCount()
{
	return updatedElementCount;
}

//==============================================================================
// 增强调试信息输出系统实现
//==============================================================================

void GPUSolver::initializeDebugLogging()
{
	// 初始化调试统计
	totalDataSyncEvents = 0;
	totalMemoryOperations = 0;
	totalKernelLaunches = 0;
	totalDataTransferTime = 0.0;
	totalKernelExecutionTime = 0.0;
	
	// 预留日志缓冲区空间
	syncEventLog.reserve(1000);
	memoryOperationLog.reserve(1000);
	performanceEventLog.reserve(1000);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::DEBUG, 
		"Enhanced debug logging system initialized");
}

void GPUSolver::finalizeDebugLogging()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Finalizing enhanced debug logging system");
	
	// 输出最终统计信息
	if (debugEnabled && moduleEnabled[static_cast<int>(DebugModule::DATA_TRANSFER)]) {
		DEBUG_COUT << "\n====== GPUSolver Final Debug Statistics ======" << std::endl;
		DEBUG_COUT << "Total Data Sync Events: " << totalDataSyncEvents << std::endl;
		DEBUG_COUT << "Total Memory Operations: " << totalMemoryOperations << std::endl;
		DEBUG_COUT << "Total Kernel Launches: " << totalKernelLaunches << std::endl;
		DEBUG_COUT << "Total Data Transfer Time: " << formatExecutionTime(totalDataTransferTime) << std::endl;
		DEBUG_COUT << "Total Kernel Execution Time: " << formatExecutionTime(totalKernelExecutionTime) << std::endl;
		DEBUG_COUT << "===============================================" << std::endl;
	}
	
	// 关闭调试文件流
	if (debugFileStream && debugFileStream->is_open()) {
		writeDebugFooter();
		debugFileStream->close();
		delete debugFileStream;
		debugFileStream = nullptr;
	}
	
	// 清理日志缓冲区
	syncEventLog.clear();
	memoryOperationLog.clear();
	performanceEventLog.clear();
}

std::string GPUSolver::getCurrentTimestamp()
{
	auto now = std::chrono::system_clock::now();
	auto time_t = std::chrono::system_clock::to_time_t(now);
	auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(
		now.time_since_epoch()) % 1000;
	
	std::stringstream ss;
	ss << std::put_time(std::localtime(&time_t), "%Y-%m-%d %H:%M:%S");
	ss << '.' << std::setfill('0') << std::setw(3) << ms.count();
	return ss.str();
}

std::string GPUSolver::formatMemorySize(size_t bytes)
{
	std::stringstream ss;
	if (bytes < 1024) {
		ss << bytes << " B";
	} else if (bytes < 1024 * 1024) {
		ss << std::fixed << std::setprecision(2) << (double)bytes / 1024.0 << " KB";
	} else if (bytes < 1024 * 1024 * 1024) {
		ss << std::fixed << std::setprecision(2) << (double)bytes / (1024.0 * 1024.0) << " MB";
	} else {
		ss << std::fixed << std::setprecision(2) << (double)bytes / (1024.0 * 1024.0 * 1024.0) << " GB";
	}
	return ss.str();
}

std::string GPUSolver::formatExecutionTime(double timeMs)
{
	std::stringstream ss;
	if (timeMs < 1.0) {
		ss << std::fixed << std::setprecision(3) << timeMs * 1000.0 << " μs";
	} else if (timeMs < 1000.0) {
		ss << std::fixed << std::setprecision(3) << timeMs << " ms";
	} else {
		ss << std::fixed << std::setprecision(3) << timeMs / 1000.0 << " s";
	}
	return ss.str();
}

void GPUSolver::addToEventLog(std::vector<std::string>& log, const std::string& entry, int maxSize)
{
	log.push_back(entry);
	// 保持日志大小在合理范围内
	if (static_cast<int>(log.size()) > maxSize) {
		log.erase(log.begin(), log.begin() + (log.size() - maxSize));
	}
}

void GPUSolver::writeDebugHeader()
{
	if (debugFileStream && debugFileStream->is_open()) {
		*debugFileStream << "====== GPUSolver Enhanced Debug Log ======" << std::endl;
		*debugFileStream << "Timestamp: " << getCurrentTimestamp() << std::endl;
		*debugFileStream << "System DOF: " << getNumEqn() << std::endl;
		*debugFileStream << "Elements: " << numElements << std::endl;
		*debugFileStream << "===========================================" << std::endl;
	}
}

void GPUSolver::writeDebugFooter()
{
	if (debugFileStream && debugFileStream->is_open()) {
		*debugFileStream << "\n====== Debug Session Summary ======" << std::endl;
		*debugFileStream << "Total Data Sync Events: " << totalDataSyncEvents << std::endl;
		*debugFileStream << "Total Memory Operations: " << totalMemoryOperations << std::endl;
		*debugFileStream << "Total Kernel Launches: " << totalKernelLaunches << std::endl;
		*debugFileStream << "Total Data Transfer Time: " << formatExecutionTime(totalDataTransferTime) << std::endl;
		*debugFileStream << "Total Kernel Execution Time: " << formatExecutionTime(totalKernelExecutionTime) << std::endl;
		*debugFileStream << "====================================" << std::endl;
	}
}

void GPUSolver::flushDebugOutput()
{
	if (debugFileStream && debugFileStream->is_open()) {
		debugFileStream->flush();
	}
}

// ====== 增强调试信息公共接口实现 ======

void GPUSolver::printSynchronizationStatus()
{
	DEBUG_COUT << "\n====== GPU Synchronization Status ======" << std::endl;
	DEBUG_COUT << "Node Data Synchronized: " << (nodeDataSynchronized ? "YES" : "NO") << std::endl;
	DEBUG_COUT << "Material Data Synchronized: " << (materialDataSynchronized ? "YES" : "NO") << std::endl;
	DEBUG_COUT << "Geometry Updated: " << (elementGeometryUpdated ? "YES" : "NO") << std::endl;
	DEBUG_COUT << "Automatic Sync Enabled: " << (automaticSyncEnabled ? "YES" : "NO") << std::endl;
	DEBUG_COUT << "Large Deformation Mode: " << (largeDeformationEnabled ? "ENABLED" : "DISABLED") << std::endl;
	
	DEBUG_COUT << "\nSync Timestamps:" << std::endl;
	DEBUG_COUT << "  Last Node Sync: " << formatExecutionTime(lastSyncTime * 1000) << " ago" << std::endl;
	DEBUG_COUT << "  Last Material Sync: " << formatExecutionTime(materialSyncTime * 1000) << " ago" << std::endl;
	DEBUG_COUT << "  Last Geometry Update: " << formatExecutionTime(geometryUpdateTime * 1000) << " ago" << std::endl;
	
	DEBUG_COUT << "\nData Counts:" << std::endl;
	DEBUG_COUT << "  Node Data Size: " << nodeDataSize << " DOF" << std::endl;
	DEBUG_COUT << "  Active Materials: " << activeMaterials.size() << std::endl;
	DEBUG_COUT << "  Updated Elements: " << updatedElements.size() << std::endl;
	DEBUG_COUT << "=========================================" << std::endl;
}

void GPUSolver::printMemoryUsageSummary()
{
	DEBUG_COUT << "\n====== GPU Memory Usage Summary ======" << std::endl;
	
	// 计算各类缓冲区内存使用
	size_t nodeDataMemory = nodeDataSize * 3 * sizeof(double); // 位移+速度+加速度
	size_t materialMemory = maxMaterialStates * (materialStateSize * 2 + materialPropertySize) * sizeof(double);
	size_t geometryMemory = maxGeometryNodes * 3 * sizeof(double) * 3; // 坐标+变形+参考
	
	DEBUG_COUT << "Node Data Buffers: " << formatMemorySize(nodeDataMemory) << std::endl;
	DEBUG_COUT << "Material State Buffers: " << formatMemorySize(materialMemory) << std::endl;
	DEBUG_COUT << "Geometry Buffers: " << formatMemorySize(geometryMemory) << std::endl;
	
	size_t totalCustomMemory = nodeDataMemory + materialMemory + geometryMemory;
	DEBUG_COUT << "Total Custom GPU Memory: " << formatMemorySize(totalCustomMemory) << std::endl;
	
	// GPU内存监控统计
	DEBUG_COUT << "\nGPU Memory Operations:" << std::endl;
	DEBUG_COUT << "  Total Allocations: " << getTotalAllocations() << std::endl;
	DEBUG_COUT << "  Current Allocated: " << formatMemorySize(getCurrentAllocatedMemory()) << std::endl;
	DEBUG_COUT << "  Peak Memory Usage: " << formatMemorySize(getPeakMemoryUsage()) << std::endl;
	
	DEBUG_COUT << "=======================================" << std::endl;
}

void GPUSolver::printPerformanceStatistics()
{
	DEBUG_COUT << "\n====== GPU Performance Statistics ======" << std::endl;
	
	// 性能计时统计
	DEBUG_COUT << "Timing Statistics:" << std::endl;
	DEBUG_COUT << "  Matrix Assembly: " << formatExecutionTime(getAverageTime(TimerType::MATRIX_ASSEMBLY)) << " (avg)" << std::endl;
	DEBUG_COUT << "  GPU Computation: " << formatExecutionTime(getAverageTime(TimerType::GPU_COMPUTATION)) << " (avg)" << std::endl;
	DEBUG_COUT << "  Data Synchronization: " << formatExecutionTime(getAverageTime(TimerType::DATA_SYNCHRONIZATION)) << " (avg)" << std::endl;
	DEBUG_COUT << "  Error Recovery: " << formatExecutionTime(getAverageTime(TimerType::ERROR_RECOVERY)) << " (avg)" << std::endl;
	
	// 增强调试统计
	DEBUG_COUT << "\nOperation Counts:" << std::endl;
	DEBUG_COUT << "  Data Sync Events: " << totalDataSyncEvents << std::endl;
	DEBUG_COUT << "  Memory Operations: " << totalMemoryOperations << std::endl;
	DEBUG_COUT << "  Kernel Launches: " << totalKernelLaunches << std::endl;
	
	DEBUG_COUT << "\nCumulative Times:" << std::endl;
	DEBUG_COUT << "  Total Data Transfer: " << formatExecutionTime(totalDataTransferTime) << std::endl;
	DEBUG_COUT << "  Total Kernel Execution: " << formatExecutionTime(totalKernelExecutionTime) << std::endl;
	
	// 计算效率指标
	if (totalKernelLaunches > 0) {
		double avgKernelTime = totalKernelExecutionTime / totalKernelLaunches;
		DEBUG_COUT << "  Average Kernel Time: " << formatExecutionTime(avgKernelTime) << std::endl;
	}
	
	DEBUG_COUT << "=========================================" << std::endl;
}

void GPUSolver::printDataTransferStatus()
{
	DEBUG_COUT << "\n====== Data Transfer Status ======" << std::endl;
	
	DEBUG_COUT << "Recent Sync Events (" << std::min(5, static_cast<int>(syncEventLog.size())) << " latest):" << std::endl;
	int start = std::max(0, static_cast<int>(syncEventLog.size()) - 5);
	for (int i = start; i < static_cast<int>(syncEventLog.size()); i++) {
		DEBUG_COUT << "  " << syncEventLog[i] << std::endl;
	}
	
	DEBUG_COUT << "\nRecent Memory Operations (" << std::min(5, static_cast<int>(memoryOperationLog.size())) << " latest):" << std::endl;
	start = std::max(0, static_cast<int>(memoryOperationLog.size()) - 5);
	for (int i = start; i < static_cast<int>(memoryOperationLog.size()); i++) {
		DEBUG_COUT << "  " << memoryOperationLog[i] << std::endl;
	}
	
	DEBUG_COUT << "\nRecent Performance Events (" << std::min(5, static_cast<int>(performanceEventLog.size())) << " latest):" << std::endl;
	start = std::max(0, static_cast<int>(performanceEventLog.size()) - 5);
	for (int i = start; i < static_cast<int>(performanceEventLog.size()); i++) {
		DEBUG_COUT << "  " << performanceEventLog[i] << std::endl;
	}
	
	DEBUG_COUT << "==================================" << std::endl;
}

void GPUSolver::logDataSyncEvent(const std::string& eventType, const std::string& details)
{
	totalDataSyncEvents++;
	
	std::stringstream ss;
	ss << getCurrentTimestamp() << " [" << eventType << "] " << details;
	std::string logEntry = ss.str();
	
	addToEventLog(syncEventLog, logEntry);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Data sync event logged: " << eventType << " - " << details);
	
	if (debugFileStream && debugFileStream->is_open()) {
		*debugFileStream << "SYNC: " << logEntry << std::endl;
	}
}

void GPUSolver::logMemoryOperation(const std::string& operation, size_t bytes, const std::string& bufferType)
{
	totalMemoryOperations++;
	
	std::stringstream ss;
	ss << getCurrentTimestamp() << " [" << operation << "] " 
	   << formatMemorySize(bytes) << " (" << bufferType << ")";
	std::string logEntry = ss.str();
	
	addToEventLog(memoryOperationLog, logEntry);
	
	GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::VERBOSE, 
		"Memory operation logged: " << operation << " - " << formatMemorySize(bytes) << " for " << bufferType);
	
	if (debugFileStream && debugFileStream->is_open()) {
		*debugFileStream << "MEM: " << logEntry << std::endl;
	}
}

void GPUSolver::logPerformanceEvent(const std::string& operation, double timeMs)
{
	totalDataTransferTime += timeMs;
	
	std::stringstream ss;
	ss << getCurrentTimestamp() << " [" << operation << "] " 
	   << formatExecutionTime(timeMs);
	std::string logEntry = ss.str();
	
	addToEventLog(performanceEventLog, logEntry);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::VERBOSE, 
		"Performance event logged: " << operation << " - " << formatExecutionTime(timeMs));
	
	if (debugFileStream && debugFileStream->is_open()) {
		*debugFileStream << "PERF: " << logEntry << std::endl;
	}
}

void GPUSolver::logGPUKernelLaunch(const std::string& kernelName, int elements, double timeMs)
{
	totalKernelLaunches++;
	totalKernelExecutionTime += timeMs;
	
	std::stringstream ss;
	ss << getCurrentTimestamp() << " [KERNEL] " << kernelName 
	   << " (elements: " << elements << ", time: " << formatExecutionTime(timeMs) << ")";
	std::string logEntry = ss.str();
	
	addToEventLog(performanceEventLog, logEntry);
	
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::VERBOSE, 
		"GPU kernel launch logged: " << kernelName << " - " << elements << " elements, " << formatExecutionTime(timeMs));
	
	if (debugFileStream && debugFileStream->is_open()) {
		*debugFileStream << "KERNEL: " << logEntry << std::endl;
	}
}

int GPUSolver::validateAllBuffers()
{
	GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::INFO, 
		"Validating all GPU buffers");
	
	int errors = 0;
	
	// 验证节点数据缓冲区
	if (nodeDataSize > 0) {
		if (!d_nodeDisplacements || !d_nodeVelocities || !d_nodeAccelerations) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Node data buffers not properly allocated");
			errors++;
		}
	}
	
	// 验证材料状态缓冲区
	if (maxMaterialStates > 0) {
		if (!d_materialStress || !d_materialStrain || !d_materialProperties) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Material state buffers not properly allocated");
			errors++;
		}
	}
	
	// 验证几何缓冲区
	if (maxGeometryNodes > 0) {
		if (!d_elementCoordinates || !d_deformedCoordinates) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
				"Geometry buffers not properly allocated");
			errors++;
		}
	}
	
	// 记录验证结果
	if (errors == 0) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::INFO, 
			"All GPU buffers validated successfully");
	} else {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Buffer validation failed with " << errors << " errors");
	}
	
	return errors;
}

void GPUSolver::printGPUDeviceInfo()
{
	int device;
	cudaGetDevice(&device);
	
	cudaDeviceProp prop;
	cudaGetDeviceProperties(&prop, device);
	
	DEBUG_COUT << "\n====== GPU Device Information ======" << std::endl;
	DEBUG_COUT << "Device Name: " << prop.name << std::endl;
	DEBUG_COUT << "Compute Capability: " << prop.major << "." << prop.minor << std::endl;
	DEBUG_COUT << "Global Memory: " << formatMemorySize(prop.totalGlobalMem) << std::endl;
	DEBUG_COUT << "Shared Memory per Block: " << formatMemorySize(prop.sharedMemPerBlock) << std::endl;
	DEBUG_COUT << "Max Threads per Block: " << prop.maxThreadsPerBlock << std::endl;
	DEBUG_COUT << "Multiprocessor Count: " << prop.multiProcessorCount << std::endl;
	DEBUG_COUT << "Clock Rate: " << prop.clockRate / 1000 << " MHz" << std::endl;
	DEBUG_COUT << "Memory Clock Rate: " << prop.memoryClockRate / 1000 << " MHz" << std::endl;
	DEBUG_COUT << "Memory Bus Width: " << prop.memoryBusWidth << " bits" << std::endl;
	DEBUG_COUT << "====================================" << std::endl;
}

void GPUSolver::printSystemConfiguration()
{
	DEBUG_COUT << "\n====== System Configuration ======" << std::endl;
	DEBUG_COUT << "System DOF: " << getNumEqn() << std::endl;
	DEBUG_COUT << "Elements: " << numElements << std::endl;
	DEBUG_COUT << "Max DOF per Element: " << maxDOFPerElement << std::endl;
	DEBUG_COUT << "Supported Elements: " << numSupportedElements << std::endl;
	DEBUG_COUT << "Unsupported Elements: " << numUnsupportedElements << std::endl;
	
	DEBUG_COUT << "\nBuffer Configurations:" << std::endl;
	DEBUG_COUT << "  Node Data Size: " << nodeDataSize << " DOF" << std::endl;
	DEBUG_COUT << "  Max Material States: " << maxMaterialStates << std::endl;
	DEBUG_COUT << "  Max Geometry Nodes: " << maxGeometryNodes << std::endl;
	
	DEBUG_COUT << "\nDebug Settings:" << std::endl;
	DEBUG_COUT << "  Debug Enabled: " << (debugEnabled ? "YES" : "NO") << std::endl;
	DEBUG_COUT << "  Debug Level: " << static_cast<int>(currentDebugLevel) << std::endl;
	DEBUG_COUT << "  Detailed Logging: " << (detailedLoggingEnabled ? "YES" : "NO") << std::endl;
	DEBUG_COUT << "  Debug File: " << (debugOutputFile.empty() ? "None" : debugOutputFile) << std::endl;
	DEBUG_COUT << "==================================" << std::endl;
}

int GPUSolver::generateDebugReport(const std::string& filename)
{
	std::ofstream reportFile(filename);
	if (!reportFile.is_open()) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to create debug report file: " << filename);
		return -1;
	}
	
	reportFile << "GPUSolver Debug Report" << std::endl;
	reportFile << "Generated: " << getCurrentTimestamp() << std::endl;
	reportFile << "======================================" << std::endl;
	
	// 重定向输出到文件
	std::streambuf* cout_backup = DEBUG_COUT.rdbuf();
	DEBUG_COUT.rdbuf(reportFile.rdbuf());
	
	printSystemConfiguration();
	printSynchronizationStatus();
	printMemoryUsageSummary();
	printPerformanceStatistics();
	printDataTransferStatus();
	
	// 恢复标准输出
	DEBUG_COUT.rdbuf(cout_backup);
	
	reportFile << "\n====== Event Logs ======" << std::endl;
	reportFile << "Sync Events:" << std::endl;
	for (const auto& event : syncEventLog) {
		reportFile << "  " << event << std::endl;
	}
	
	reportFile << "\nMemory Operations:" << std::endl;
	for (const auto& op : memoryOperationLog) {
		reportFile << "  " << op << std::endl;
	}
	
	reportFile << "\nPerformance Events:" << std::endl;
	for (const auto& event : performanceEventLog) {
		reportFile << "  " << event << std::endl;
	}
	
	reportFile.close();
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Debug report generated successfully: " << filename);
	
	return 0;
}

void GPUSolver::setDebugOutputFile(const std::string& filename)
{
	debugOutputFile = filename;
	
	// 关闭现有文件流
	if (debugFileStream && debugFileStream->is_open()) {
		debugFileStream->close();
		delete debugFileStream;
	}
	
	// 创建新文件流
	debugFileStream = new std::ofstream(filename, std::ios::app);
	if (debugFileStream->is_open()) {
		writeDebugHeader();
		GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
			"Debug output file set to: " << filename);
	} else {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR, 
			"Failed to open debug output file: " << filename);
		delete debugFileStream;
		debugFileStream = nullptr;
	}
}

void GPUSolver::enableDetailedLogging(bool enabled)
{
	detailedLoggingEnabled = enabled;
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Detailed logging " << (enabled ? "enabled" : "disabled"));
}

bool GPUSolver::isDetailedLoggingEnabled()
{
	return detailedLoggingEnabled;
}

//==============================================================================
// 增强调试信息系统集成和实用功能
//==============================================================================

void GPUSolver::printCompleteSystemStatus()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Generating complete system status report");
	
	DEBUG_COUT << "\n======== GPUSolver Complete System Status ========" << std::endl;
	
	// 系统配置信息
	printSystemConfiguration();
	
	// GPU设备信息
	printGPUDeviceInfo();
	
	// 同步状态
	printSynchronizationStatus();
	
	// 内存使用情况
	printMemoryUsageSummary();
	
	// 性能统计
	printPerformanceStatistics();
	
	// 数据传输状态
	printDataTransferStatus();
	
	DEBUG_COUT << "================================================" << std::endl;
}

void GPUSolver::enableComprehensiveDebugging()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Enabling comprehensive debugging mode");
	
	// 启用所有调试功能
	enableDebug(true);
	enableAllDebugModules();
	setDebugVerbosity(static_cast<int>(DebugLevel::DEBUG));
	
	// 启用所有监控系统
	enableGPUMemoryMonitoring(true);
	enablePerformanceTiming(true);
	enableDetailedLogging(true);
	
	// 启用自动同步
	enableAutomaticSync(true);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Comprehensive debugging mode enabled successfully");
}

void GPUSolver::disableComprehensiveDebugging()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Disabling comprehensive debugging mode");
	
	// 禁用调试功能
	disableDebug();
	disableAllDebugModules();
	
	// 禁用监控系统
	disableGPUMemoryMonitoring();
	disablePerformanceTiming();
	enableDetailedLogging(false);
	
	DEBUG_COUT << "Comprehensive debugging mode disabled" << std::endl;
}

int GPUSolver::runDiagnostics()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Running comprehensive system diagnostics");
	
	DEBUG_COUT << "\n======== GPUSolver System Diagnostics ========" << std::endl;
	
	int issues = 0;
	
	// 1. 验证GPU缓冲区
	DEBUG_COUT << "1. Validating GPU Buffers..." << std::endl;
	int bufferErrors = validateAllBuffers();
	if (bufferErrors > 0) {
		DEBUG_COUT << "   ❌ Found " << bufferErrors << " buffer allocation issues" << std::endl;
		issues += bufferErrors;
	} else {
		DEBUG_COUT << "   ✅ All GPU buffers validated successfully" << std::endl;
	}
	
	// 2. 检查内存泄漏
	DEBUG_COUT << "2. Checking GPU Memory Leaks..." << std::endl;
	if (detectGPUMemoryLeaks()) {
		DEBUG_COUT << "   ⚠️  GPU memory leaks detected" << std::endl;
		reportGPUMemoryLeaks();
		issues++;
	} else {
		DEBUG_COUT << "   ✅ No GPU memory leaks detected" << std::endl;
	}
	
	// 3. 检查同步状态
	DEBUG_COUT << "3. Checking Synchronization Status..." << std::endl;
	if (!isNodeDataSynchronized() && nodeDataSize > 0) {
		DEBUG_COUT << "   ⚠️  Node data not synchronized" << std::endl;
		issues++;
	}
	if (!isMaterialDataSynchronized() && maxMaterialStates > 0) {
		DEBUG_COUT << "   ⚠️  Material data not synchronized" << std::endl;
		issues++;
	}
	if (!isElementGeometryUpdated() && numElements > 0) {
		DEBUG_COUT << "   ⚠️  Element geometry not updated" << std::endl;
		issues++;
	}
	if (issues == 0) {
		DEBUG_COUT << "   ✅ All synchronization states are healthy" << std::endl;
	}
	
	// 4. 检查GPU错误
	DEBUG_COUT << "4. Checking GPU Error Statistics..." << std::endl;
	int totalErrors = getGPUErrorCount();
	if (totalErrors > 0) {
		DEBUG_COUT << "   ⚠️  Total GPU errors encountered: " << totalErrors << std::endl;
		printGPUErrorStatistics();
		issues++;
	} else {
		DEBUG_COUT << "   ✅ No GPU errors recorded" << std::endl;
	}
	
	// 5. 性能诊断
	DEBUG_COUT << "5. Performance Diagnostics..." << std::endl;
	double totalTime = getTotalElapsedTime(TimerType::TOTAL_SOLVE);
	if (totalTime > 0) {
		DEBUG_COUT << "   📊 Total solve time recorded: " << formatExecutionTime(totalTime * 1000) << std::endl;
		double gpuTime = getTotalElapsedTime(TimerType::GPU_COMPUTATION);
		double dataTime = getTotalElapsedTime(TimerType::DATA_SYNCHRONIZATION);
		if (gpuTime > 0 && dataTime > 0) {
			double efficiency = gpuTime / (gpuTime + dataTime) * 100;
			DEBUG_COUT << "   📈 GPU compute efficiency: " << std::fixed << std::setprecision(1) << efficiency << "%" << std::endl;
		}
	} else {
		DEBUG_COUT << "   ℹ️  No performance data available" << std::endl;
	}
	
	// 总结
	DEBUG_COUT << "\n======== Diagnostics Summary ========" << std::endl;
	if (issues == 0) {
		DEBUG_COUT << "🎉 System diagnostics completed successfully - No issues found!" << std::endl;
	} else {
		DEBUG_COUT << "⚠️  System diagnostics completed with " << issues << " issues found" << std::endl;
		DEBUG_COUT << "   Please review the issues above and take corrective action" << std::endl;
	}
	DEBUG_COUT << "=====================================" << std::endl;
	
	return issues;
}

void GPUSolver::setupProductionDebugging()
{
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO, 
		"Setting up production-level debugging configuration");
	
	// 生产环境调试配置：只启用关键信息
	enableDebug(true);
	setDebugVerbosity(static_cast<int>(DebugLevel::WARNING));  // 只显示警告和错误
	
	// 启用关键模块的调试
	setModuleDebugEnabled(static_cast<int>(DebugModule::ERROR_HANDLING), true);
	setModuleDebugEnabled(static_cast<int>(DebugModule::MEMORY_MANAGEMENT), true);
	setModuleDebugEnabled(static_cast<int>(DebugModule::DATA_TRANSFER), false);  // 生产环境关闭详细传输日志
	setModuleDebugEnabled(static_cast<int>(DebugModule::GPU_COMPUTE), false);
	setModuleDebugEnabled(static_cast<int>(DebugModule::MATRIX_ASSEMBLY), false);
	
	// 启用内存监控但不启用详细日志
	enableGPUMemoryMonitoring(true);
	enablePerformanceTiming(false);  // 生产环境不启用详细计时
	enableDetailedLogging(false);
	
	GPU_DEBUG_PRINT(DebugModule::DATA_TRANSFER, DebugLevel::INFO,
		"Production debugging configuration applied");
}

//==============================================================================
// Jacobi Preconditioner Implementation
//==============================================================================

int GPUSolver::buildJacobiPreconditioner() {
	if (preconditionerBuilt && d_M_inv != nullptr) {
		return 0;  // 已构建，无需重复
	}

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"Building Jacobi preconditioner for size=" << size);

	// 提取对角元素
	std::vector<double> diag(size, 0.0);
	bool foundDiag = false;

	for (int i = 0; i < size; i++) {
		foundDiag = false;
		for (int j = rowPtr[i]; j < rowPtr[i + 1]; j++) {
			if (colInd[j] == i) {  // 对角元素
				diag[i] = val[j];
				foundDiag = true;
				break;
			}
		}

		// 对角元素为0或未找到，设为1避免除零
		if (!foundDiag || fabs(diag[i]) < 1e-14) {
			diag[i] = 1.0;
			if (!foundDiag) {
				GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::WARNING,
					"Row " << i << " has no diagonal element, set to 1.0");
			}
		}
	}

	// 计算对角逆 M_inv = diag(A)^(-1)
	std::vector<double> diag_inv(size);
	for (int i = 0; i < size; i++) {
		diag_inv[i] = 1.0 / diag[i];
	}

	// 分配GPU内存并传输
	if (d_M_inv == nullptr) {
		cudaError_t err = cudaMalloc(&d_M_inv, size * sizeof(double));
		if (err != cudaSuccess) {
			GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
				"Failed to allocate d_M_inv: " << cudaGetErrorString(err));
			return -1;
		}
	}

	cudaError_t err = cudaMemcpy(d_M_inv, diag_inv.data(),
		size * sizeof(double),
		cudaMemcpyHostToDevice);
	if (err != cudaSuccess) {
		GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
			"Failed to copy d_M_inv to GPU: " << cudaGetErrorString(err));
		return -1;
	}

	preconditionerBuilt = true;

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"Jacobi preconditioner built successfully");

	return 0;
}

// Preconditioner control interface
void GPUSolver::enablePreconditioner(bool enable) {
	usePreconditioner = enable;
	if (!enable) {
		preconditionerBuilt = false;
	}
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"Preconditioner " << (enable ? "enabled" : "disabled"));
}

bool GPUSolver::isPreconditionerEnabled() const {
	return usePreconditioner;
}

// ====== Warm Start Control Implementation ======
void GPUSolver::enableWarmStart(bool enable) {
	warmStartEnabled = enable;
	if (!enable) {
		hasWarmStartData = false;
	}
	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"Warm start " << (enable ? "enabled" : "disabled"));
}

bool GPUSolver::isWarmStartEnabled() const {
	return warmStartEnabled;
}

void GPUSolver::resetWarmStart() {
	hasWarmStartData = false;
	if (d_X_prev) {
		cudaFree(d_X_prev);
		d_X_prev = nullptr;
	}
	warmStartCounter = 0;
	coldStartCounter = 0;

	GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
		"Warm start data reset");
}

void GPUSolver::getWarmStartStatistics(int& warmStarts, int& coldStarts) const {
	warmStarts = warmStartCounter;
	coldStarts = coldStartCounter;
}
// ====== End of Warm Start Control Implementation ======

//==============================================================================
// 增强调试信息系统完成标记
//==============================================================================


