/**
 * @file op_runner.cpp
 *
 * Copyright (C) 2025. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include "op_runner.h"

#include <cassert>
#include <limits>
#include <iomanip> // 新增: 用于 setw / setprecision
#include <cmath>   // 新增: 用于 std::floor

#include "acl/acl_op_compiler.h"
// xx_xx_xx
#include "aclnn_range_custom.h"
#include "common.h"

using namespace std;

extern bool g_isDevice;

OpRunner::OpRunner(OperatorDesc *opDesc) : opDesc_(opDesc)
{
    numInputs_ = opDesc->inputDesc.size();
    numOutputs_ = opDesc->outputDesc.size();
    workspace_ = nullptr;
}

OpRunner::~OpRunner()
{
    INFO_LOG("OpRunner::~OpRunner ENTER (numInputs=%zu, numOutputs=%zu)", numInputs_, numOutputs_);

    if (workspace_ != nullptr) {
        INFO_LOG("OpRunner::~OpRunner: Free workspace_=%p", workspace_);
        (void)aclrtFree(workspace_);
        workspace_ = nullptr;
    } else {
        INFO_LOG("OpRunner::~OpRunner: workspace_ is nullptr, skip free");
    }

    for (size_t i = 0; i < numInputs_; ++i) {
        INFO_LOG("OpRunner::~OpRunner: Destroying input[%zu]: tensor=%p, buffer=%p, dev=%p, host=%p",
                 i,
                 (i < inputTensor_.size() ? inputTensor_[i] : nullptr),
                 (i < inputBuffers_.size() ? inputBuffers_[i] : nullptr),
                 (i < devInputs_.size() ? devInputs_[i] : nullptr),
                 (i < hostInputs_.size() ? hostInputs_[i] : nullptr));

        if (i < inputTensor_.size() && inputTensor_[i] != nullptr) {
            (void)aclDestroyTensor(inputTensor_[i]);
            inputTensor_[i] = nullptr;
        }
        if (i < inputBuffers_.size() && inputBuffers_[i] != nullptr) {
            (void)aclDestroyDataBuffer(inputBuffers_[i]);
            inputBuffers_[i] = nullptr;
        }
        if (i < devInputs_.size() && devInputs_[i] != nullptr) {
            (void)aclrtFree(devInputs_[i]);
            devInputs_[i] = nullptr;
        }
        if (i < hostInputs_.size() && hostInputs_[i] != nullptr) {
            if (g_isDevice) {
                (void)aclrtFree(hostInputs_[i]);
            } else {
                (void)aclrtFreeHost(hostInputs_[i]);
            }
            hostInputs_[i] = nullptr;
        }
    }

    for (size_t i = 0; i < numOutputs_; ++i) {
        INFO_LOG("OpRunner::~OpRunner: Destroying output[%zu]: tensor=%p, buffer=%p, dev=%p, host=%p",
                 i,
                 (i < outputTensor_.size() ? outputTensor_[i] : nullptr),
                 (i < outputBuffers_.size() ? outputBuffers_[i] : nullptr),
                 (i < devOutputs_.size() ? devOutputs_[i] : nullptr),
                 (i < hostOutputs_.size() ? hostOutputs_[i] : nullptr));

        if (i < outputTensor_.size() && outputTensor_[i] != nullptr) {
            (void)aclDestroyTensor(outputTensor_[i]);
            outputTensor_[i] = nullptr;
        }
        if (i < outputBuffers_.size() && outputBuffers_[i] != nullptr) {
            (void)aclDestroyDataBuffer(outputBuffers_[i]);
            outputBuffers_[i] = nullptr;
        }
        if (i < devOutputs_.size() && devOutputs_[i] != nullptr) {
            (void)aclrtFree(devOutputs_[i]);
            devOutputs_[i] = nullptr;
        }
        if (i < hostOutputs_.size() && hostOutputs_[i] != nullptr) {
            if (g_isDevice) {
                (void)aclrtFree(hostOutputs_[i]);
            } else {
                (void)aclrtFreeHost(hostOutputs_[i]);
            }
            hostOutputs_[i] = nullptr;
        }
    }

    INFO_LOG("OpRunner::~OpRunner EXIT");
}

bool OpRunner::Init()
{
    INFO_LOG("OpRunner::Init started, inputs=%zu, outputs=%zu", numInputs_, numOutputs_);
    
    for (size_t i = 0; i < numInputs_; ++i) {
        auto size = GetInputSize(i);
        void *devMem = nullptr;
        if (aclrtMalloc(&devMem, size, ACL_MEM_MALLOC_HUGE_FIRST) != ACL_SUCCESS) {
            ERROR_LOG("Malloc device memory for input[%zu] failed, size=%zu", i, size);
            return false;
        }
        devInputs_.emplace_back(devMem);
        inputBuffers_.emplace_back(aclCreateDataBuffer(devMem, size));

        void *hostInput = nullptr;
        if (g_isDevice) {
            if (aclrtMalloc(&hostInput, size, ACL_MEM_MALLOC_HUGE_FIRST) != ACL_SUCCESS) {
                ERROR_LOG("Malloc device memory for input[%zu] failed, size=%zu", i, size);
                return false;
            }
        } else {
            if (aclrtMallocHost(&hostInput, size) != ACL_SUCCESS) {
                ERROR_LOG("Malloc host memory for input[%zu] failed, size=%zu", i, size);
                return false;
            }
        }
        if (hostInput == nullptr) {
            ERROR_LOG("Malloc memory for input[%zu] failed", i);
            return false;
        }
        hostInputs_.emplace_back(hostInput);

        aclTensor *inputTensor =
            aclCreateTensor(GetInputShape(i).data(), GetInputNumDims(i), GetInputDataType(i), nullptr, 0,
                            GetInputFormat(i), GetInputShape(i).data(), GetInputNumDims(i), devInputs_[i]);
        if (inputTensor == nullptr) {
            ERROR_LOG("Create Tensor for input[%zu] failed", i);
            return false;
        }
        inputTensor_.emplace_back(inputTensor);
    }

    for (size_t i = 0; i < numOutputs_; ++i) {
        auto size = GetOutputSize(i);
        void *devMem = nullptr;
        
        // 处理输出大小为0的情况
        if (size == 0) {
            INFO_LOG("Output[%zu] size is 0, skipping memory allocation", i);
            devMem = nullptr;
        } else {
            if (aclrtMalloc(&devMem, size, ACL_MEM_MALLOC_HUGE_FIRST) != ACL_SUCCESS) {
                ERROR_LOG("Malloc device memory for output[%zu] failed, size=%zu", i, size);
                return false;
            }
        }
        devOutputs_.emplace_back(devMem);
        
        // 创建数据缓冲区，如果size为0则创建空缓冲区
        aclDataBuffer *outputBuffer = (size == 0) ?
            aclCreateDataBuffer(nullptr, 0) :
            aclCreateDataBuffer(devMem, size);
        outputBuffers_.emplace_back(outputBuffer);

        void *hostOutput = nullptr;
        if (size > 0) {
            if (g_isDevice) {
                if (aclrtMalloc(&hostOutput, size, ACL_MEM_MALLOC_HUGE_FIRST) != ACL_SUCCESS) {
                    ERROR_LOG("Malloc device memory for output[%zu] failed, size=%zu", i, size);
                    return false;
                }
            } else {
                if (aclrtMallocHost(&hostOutput, size) != ACL_SUCCESS) {
                    ERROR_LOG("Malloc host memory for output[%zu] failed, size=%zu", i, size);
                    return false;
                }
            }
            if (hostOutput == nullptr) {
                ERROR_LOG("Malloc host memory for output[%zu] failed, size=%zu", i, size);
                return false;
            }
        } else {
            INFO_LOG("Output[%zu] size is 0, setting host output to nullptr", i);
        }
        hostOutputs_.emplace_back(hostOutput);

        // 创建输出张量，如果size为0则使用空指针
        aclTensor *outputTensor = nullptr;
        if (size > 0) {
            outputTensor = aclCreateTensor(GetOutputShape(i).data(), GetOutputNumDims(i), GetOutputDataType(i), nullptr, 0,
                                GetOutputFormat(i), GetOutputShape(i).data(), GetOutputNumDims(i), devOutputs_[i]);
        } else {
            // 对于空输出，创建一个空的张量描述
            std::vector<int64_t> emptyShape{0};
            outputTensor = aclCreateTensor(emptyShape.data(), 1, GetOutputDataType(i), nullptr, 0,
                                GetOutputFormat(i), emptyShape.data(), 1, nullptr);
        }
        
        if (outputTensor == nullptr) {
            ERROR_LOG("Create Tensor for output[%zu] failed", i);
            return false;
        }
        outputTensor_.emplace_back(outputTensor);
    }

    INFO_LOG("OpRunner::Init completed successfully");
    return true;
}

const size_t OpRunner::NumInputs()
{
    return numInputs_;
}

const size_t OpRunner::NumOutputs()
{
    return numOutputs_;
}

const size_t OpRunner::GetInputSize(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return 0;
    }

    return aclGetTensorDescSize(opDesc_->inputDesc[index]);
}

const size_t OpRunner::GetInputNumDims(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return 0;
    }

    return aclGetTensorDescNumDims(opDesc_->inputDesc[index]);
}

aclDataType OpRunner::GetInputDataType(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return ACL_DT_UNDEFINED;
    }

    return aclGetTensorDescType(opDesc_->inputDesc[index]);
}

aclFormat OpRunner::GetInputFormat(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return ACL_FORMAT_UNDEFINED;
    }

    return aclGetTensorDescFormat(opDesc_->inputDesc[index]);
}

std::vector<int64_t> OpRunner::GetInputShape(size_t index) const
{
    std::vector<int64_t> ret;
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return ret;
    }

    auto desc = opDesc_->inputDesc[index];
    for (size_t i = 0; i < aclGetTensorDescNumDims(desc); ++i) {
        int64_t dimSize;
        if (aclGetTensorDescDimV2(desc, i, &dimSize) != ACL_SUCCESS) {
            ERROR_LOG("get dims from tensor desc failed. dims index = %zu", i);
            ret.clear();
            return ret;
        }
        ret.emplace_back(dimSize);
    }

    return ret;
}

size_t OpRunner::GetOutputSize(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return 0;
    }

    return aclGetTensorDescSize(opDesc_->outputDesc[index]);
}

const size_t OpRunner::GetOutputNumDims(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return 0;
    }

    return aclGetTensorDescNumDims(opDesc_->outputDesc[index]);
}

aclDataType OpRunner::GetOutputDataType(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return ACL_DT_UNDEFINED;
    }

    return aclGetTensorDescType(opDesc_->outputDesc[index]);
}

aclFormat OpRunner::GetOutputFormat(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return ACL_FORMAT_UNDEFINED;
    }

    return aclGetTensorDescFormat(opDesc_->outputDesc[index]);
}

std::vector<int64_t> OpRunner::GetOutputShape(size_t index) const
{
    std::vector<int64_t> ret;
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return ret;
    }

    auto desc = opDesc_->outputDesc[index];
    for (size_t i = 0; i < aclGetTensorDescNumDims(desc); ++i) {
        int64_t dimSize;
        if (aclGetTensorDescDimV2(desc, i, &dimSize) != ACL_SUCCESS) {
            ERROR_LOG("get dims from tensor desc failed. dims index = %zu", i);
            ret.clear();
            return ret;
        }
        ret.emplace_back(dimSize);
    }
    return ret;
}

size_t OpRunner::GetInputElementCount(size_t index) const
{
    if (index >= opDesc_->inputDesc.size()) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return 0;
    }

    return aclGetTensorDescElementCount(opDesc_->inputDesc[index]);
}

size_t OpRunner::GetOutputElementCount(size_t index) const
{
    if (index >= opDesc_->outputDesc.size()) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return 0;
    }

    return aclGetTensorDescElementCount(opDesc_->outputDesc[index]);
}

bool OpRunner::RunOp()
{
    INFO_LOG("OpRunner::RunOp started");
    
    // 检查输出大小，如果所有输出都为0，则直接返回成功
    bool allOutputsEmpty = true;
    for (size_t i = 0; i < numOutputs_; ++i) {
        if (GetOutputSize(i) > 0) {
            allOutputsEmpty = false;
            break;
        }
    }
    
    if (allOutputsEmpty) {
        INFO_LOG("All outputs are empty, skipping operator execution");
        return true;
    }
    
    for (size_t i = 0; i < numInputs_; ++i) {
        auto size = GetInputSize(i);
        aclrtMemcpyKind kind = ACL_MEMCPY_HOST_TO_DEVICE;
        if (g_isDevice) {
            kind = ACL_MEMCPY_DEVICE_TO_DEVICE;
        }
        if (aclrtMemcpy(devInputs_[i], size, hostInputs_[i], size, kind) != ACL_SUCCESS) {
            ERROR_LOG("Copy input[%zu] failed, size=%zu", i, size);
            return false;
        }
        INFO_LOG("Copy input[%zu] success, size=%zu", i, size);
    }

    aclrtStream stream = nullptr;
    if (aclrtCreateStream(&stream) != ACL_SUCCESS) {
        ERROR_LOG("Create stream failed");
        return false;
    }
    INFO_LOG("Create stream success");

    if (numInputs_ < 3) {
        ERROR_LOG("Range op requires 3 inputs (start/end/step), got %zu", numInputs_);
        (void)aclrtDestroyStream(stream);
        return false;
    }

    // 详细调试：检查输入数据
    INFO_LOG("=== 输入数据调试信息 ===");
    for (size_t i = 0; i < numInputs_; ++i) {
        size_t inputSize = GetInputSize(i);
        size_t elemCount = GetInputElementCount(i);
        aclDataType dtype = GetInputDataType(i);
        aclFormat format = GetInputFormat(i);
        std::vector<int64_t> shape = GetInputShape(i);
        
        INFO_LOG("Input[%zu]: size=%zu, elemCount=%zu, dtype=%d, format=%d",
                 i, inputSize, elemCount, (int)dtype, (int)format);
        INFO_LOG("  Shape: [");
        for (size_t d = 0; d < shape.size(); ++d) {
            INFO_LOG("    %ld%s", shape[d], (d == shape.size()-1) ? "" : ",");
        }
        INFO_LOG("  ]");
        
        // 打印实际数据内容
        if (dtype == ACL_FLOAT && elemCount > 0) {
            const float *data = reinterpret_cast<const float *>(hostInputs_[i]);
            INFO_LOG("  Data[0]: %.6f", data[0]);
        }
    }

    const float *startPtr = reinterpret_cast<const float *>(hostInputs_[0]);
    const float *endPtr   = reinterpret_cast<const float *>(hostInputs_[1]);
    const float *stepPtr  = reinterpret_cast<const float *>(hostInputs_[2]);

    if (startPtr == nullptr || endPtr == nullptr || stepPtr == nullptr) {
        ERROR_LOG("Null input pointer: start=%p end=%p step=%p", startPtr, endPtr, stepPtr);
        (void)aclrtDestroyStream(stream);
        return false;
    }

    float startVal = startPtr[0];
    float endVal   = endPtr[0];
    float stepVal  = stepPtr[0];

    if (stepVal == 0.0f) {
        ERROR_LOG("Invalid step=0. start=%.6f end=%.6f", startVal, endVal);
        (void)aclrtDestroyStream(stream);
        return false;
    }

    if ((endVal > startVal && stepVal < 0) || (endVal < startVal && stepVal > 0)) {
        WARN_LOG("Step direction may be wrong. start=%.6f end=%.6f step=%.6f", startVal, endVal, stepVal);
    }

    double diff = static_cast<double>(endVal) - static_cast<double>(startVal);
    double steps = diff / static_cast<double>(stepVal);
    int64_t theoreticalCount = 0;
    if (stepVal > 0 ? (startVal <= endVal) : (startVal >= endVal)) {
        theoreticalCount = static_cast<int64_t>(std::floor(steps)) + 1;
    }
    INFO_LOG("Range debug: start=%.6f end=%.6f step=%.6f diff=%.6f theoreticalCount=%ld",
             startVal, endVal, stepVal, diff, theoreticalCount);

    std::vector<float> startVec{startVal};
    std::vector<float> endVec{endVal};
    std::vector<float> stepVec{stepVal};

    aclFloatArray *startArray = aclCreateFloatArray(startVec.data(), startVec.size());
    aclFloatArray *endArray   = aclCreateFloatArray(endVec.data(), endVec.size());
    aclFloatArray *stepArray  = aclCreateFloatArray(stepVec.data(), stepVec.size());
    if (startArray == nullptr || endArray == nullptr || stepArray == nullptr) {
        ERROR_LOG("Create aclFloatArray failed. startArray=%p endArray=%p stepArray=%p",
                  startArray, endArray, stepArray);
        if (startArray) aclDestroyFloatArray(startArray);
        if (endArray)   aclDestroyFloatArray(endArray);
        if (stepArray)  aclDestroyFloatArray(stepArray);
        (void)aclrtDestroyStream(stream);
        return false;
    }

    // ===== Debug 信息: 在调用 aclnnRangeCustomGetWorkspaceSize 之前打印所有相关变量 =====
    INFO_LOG("=== 算子调用前调试信息 ===");
    INFO_LOG("PreCall Range Debug: startVal=%.9f endVal=%.9f stepVal=%.9f", startVal, endVal, stepVal);
    INFO_LOG("Host pointers: startPtr=%p endPtr=%p stepPtr=%p", startPtr, endPtr, stepPtr);
    INFO_LOG("aclFloatArray pointers: startArray=%p len=%zu endArray=%p len=%zu stepArray=%p len=%zu",
             startArray, startVec.size(), endArray, endVec.size(), stepArray, stepVec.size());
    
    // 详细输出张量信息
    INFO_LOG("=== 输出张量详细信息 ===");
    size_t preOutSizeBytes = GetOutputSize(0);
    size_t preOutElemCount = GetOutputElementCount(0);
    INFO_LOG("Output tensor (index 0): ptr=%p devBuf=%p sizeBytes=%zu elemCount=%zu",
             outputTensor_[0], devOutputs_[0], preOutSizeBytes, preOutElemCount);
    aclDataType outTypeDbg = aclGetTensorDescType(opDesc_->outputDesc[0]);
    aclFormat outFmtDbg = aclGetTensorDescFormat(opDesc_->outputDesc[0]);
    INFO_LOG("Output tensor dtype=%d format=%d", (int)outTypeDbg, (int)outFmtDbg);
    size_t outDimsDbg = aclGetTensorDescNumDims(opDesc_->outputDesc[0]);
    INFO_LOG("Output tensor numDims=%zu (theoreticalCount=%ld)", outDimsDbg, theoreticalCount);
    for (size_t d = 0; d < outDimsDbg; ++d) {
        int64_t dimSizeDbg = -1;
        aclError dimRet = aclGetTensorDescDimV2(opDesc_->outputDesc[0], d, &dimSizeDbg);
        INFO_LOG("  Dim[%zu]: size=%ld (ret=%d)", d, dimSizeDbg, (int)dimRet);
    }
    
    // 检查输出张量形状是否与理论计算一致
    if (theoreticalCount > 0 && preOutElemCount != static_cast<size_t>(theoreticalCount)) {
        WARN_LOG("Output element count mismatch: expected=%ld, actual=%zu",
                 theoreticalCount, preOutElemCount);
    }
    // ===== 结束 Debug 信息 =====
    size_t workspaceSize = 0;
    aclOpExecutor *handle = nullptr;
 
    INFO_LOG("Calling aclnnRangeCustomGetWorkspaceSize...");
    aclError ret = aclnnRangeCustomGetWorkspaceSize(
        startArray,
        endArray,
        stepArray,
        outputTensor_[0],
        &workspaceSize,
        &handle);
    
    INFO_LOG("aclnnRangeCustomGetWorkspaceSize returned: ret=%d, workspaceSize=%zu, handle=%p",
             (int)ret, workspaceSize, handle);
    
    if (ret != ACL_SUCCESS) {
        ERROR_LOG("GetWorkspace failed ret=%d start=%.6f end=%.6f step=%.6f",
                  (int)ret, startVal, endVal, stepVal);
        aclDestroyFloatArray(startArray);
        aclDestroyFloatArray(endArray);
        aclDestroyFloatArray(stepArray);
        (void)aclrtDestroyStream(stream);
        return false;
    }
    
    if (handle == nullptr) {
        ERROR_LOG("aclnnRangeCustomGetWorkspaceSize returned null handle");
        aclDestroyFloatArray(startArray);
        aclDestroyFloatArray(endArray);
        aclDestroyFloatArray(stepArray);
        (void)aclrtDestroyStream(stream);
        return false;
    }
    
    INFO_LOG("Workspace size=%zu", workspaceSize);

    if (workspaceSize != 0) {
        if (aclrtMalloc(&workspace_, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST) != ACL_SUCCESS) {
            ERROR_LOG("Malloc workspace failed, size=%zu", workspaceSize);
            aclDestroyFloatArray(startArray);
            aclDestroyFloatArray(endArray);
            aclDestroyFloatArray(stepArray);
            (void)aclrtDestroyStream(stream);
            return false;
        }
    }

    ret = aclnnRangeCustom(workspace_, workspaceSize, handle, stream);
    if (ret != ACL_SUCCESS) {
        ERROR_LOG("aclnnRangeCustom execute failed ret=%d", (int)ret);
        aclDestroyFloatArray(startArray);
        aclDestroyFloatArray(endArray);
        aclDestroyFloatArray(stepArray);
        (void)aclrtDestroyStream(stream);
        return false;
    }
    INFO_LOG("aclnnRangeCustom launched");

    ret = aclrtSynchronizeStreamWithTimeout(stream, 5000);
    if (ret != ACL_SUCCESS) {
        ERROR_LOG("Stream sync failed ret=%d", (int)ret);
        aclDestroyFloatArray(startArray);
        aclDestroyFloatArray(endArray);
        aclDestroyFloatArray(stepArray);
        (void)aclrtDestroyStream(stream);
        return false;
    }
    INFO_LOG("Stream sync success 1");

    for (size_t i = 0; i < numOutputs_; ++i) {
        if(hostOutputs_[i] == nullptr||devOutputs_[i]==nullptr){ 
            INFO_LOG("Output[%zu] size is 0, skipping copy back", i);
            continue;
        }
   
        size_t outSize = GetOutputSize(i);
        aclrtMemcpyKind kind = g_isDevice ? ACL_MEMCPY_DEVICE_TO_DEVICE : ACL_MEMCPY_DEVICE_TO_HOST;
        if (aclrtMemcpy(hostOutputs_[i], outSize, devOutputs_[i], outSize, kind) != ACL_SUCCESS) {
            ERROR_LOG("Copy output[%zu] failed size=%zu", i, outSize);
            aclDestroyFloatArray(startArray);
            aclDestroyFloatArray(endArray);
            aclDestroyFloatArray(stepArray);
           (void)aclrtDestroyStream(stream);
            return false;
        }
        INFO_LOG("Copy output[%zu] success size=%zu", i, outSize);
    }

    size_t outCount = GetOutputElementCount(0);
    INFO_LOG("Range output element count=%zu (theoretical=%ld)", outCount, theoreticalCount);
    
    /*
     * 输出结果验证：仅在 outCount > 0 且 hostOutputs_[0] 非空时访问输出数据。
     * 这样可以避免在输出 shape 异常或为空时访问无效指针导致段错误。
     */
    if (outCount > 0 && hostOutputs_[0] != nullptr) {
        INFO_LOG("=== 输出结果验证 ===");
        const float *outputData = reinterpret_cast<const float *>(hostOutputs_[0]);
        INFO_LOG("First 5 output values:");
        for (size_t i = 0; i < std::min(outCount, static_cast<size_t>(5)); ++i) {
            INFO_LOG("  output[%zu] = %.6f", i, outputData[i]);
        }
        INFO_LOG("Last 5 output values:");
        for (size_t i = std::max(static_cast<size_t>(0), outCount - 5); i < outCount; ++i) {
            INFO_LOG("  output[%zu] = %.6f", i, outputData[i]);
        }
    } else {
        INFO_LOG("Skip result check since outCount=%zu or hostOutputs_[0]=%p", outCount, hostOutputs_[0]);
    }
    
    aclDestroyFloatArray(startArray);
    aclDestroyFloatArray(endArray);
    aclDestroyFloatArray(stepArray);
    (void)aclrtDestroyStream(stream);
    INFO_LOG("=== Range算子执行完成 ===");
    return true;
}

template <typename T> void DoPrintData(const T *data, size_t count, size_t elementsPerRow)
{
    assert(elementsPerRow != 0);
    for (size_t i = 0; i < count; ++i) {
        std::cout << std::setw(10) << data[i];
        if (i % elementsPerRow == elementsPerRow - 1) {
            std::cout << std::endl;
        }
    }
}

void DoPrintFp16Data(const aclFloat16 *data, size_t count, size_t elementsPerRow)
{
    assert(elementsPerRow != 0);
    for (size_t i = 0; i < count; ++i) {
        std::cout << std::setw(10) << std::setprecision(4) << aclFloat16ToFloat(data[i]);
        if (i % elementsPerRow == elementsPerRow - 1) {
            std::cout << std::endl;
        }
    }
}

void PrintData(const void *data, size_t count, aclDataType dataType, size_t elementsPerRow)
{
    if (data == nullptr) {
        ERROR_LOG("Print data failed. data is nullptr");
        return;
    }

    switch (dataType) {
        case ACL_BOOL:
            DoPrintData(reinterpret_cast<const bool *>(data), count, elementsPerRow);
            break;
        case ACL_INT8:
            DoPrintData(reinterpret_cast<const int8_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT8:
            DoPrintData(reinterpret_cast<const uint8_t *>(data), count, elementsPerRow);
            break;
        case ACL_INT16:
            DoPrintData(reinterpret_cast<const int16_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT16:
            DoPrintData(reinterpret_cast<const uint16_t *>(data), count, elementsPerRow);
            break;
        case ACL_INT32:
            DoPrintData(reinterpret_cast<const int32_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT32:
            DoPrintData(reinterpret_cast<const uint32_t *>(data), count, elementsPerRow);
            break;
        case ACL_INT64:
            DoPrintData(reinterpret_cast<const int64_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT64:
            DoPrintData(reinterpret_cast<const uint64_t *>(data), count, elementsPerRow);
            break;
        case ACL_FLOAT16:
            DoPrintFp16Data(reinterpret_cast<const aclFloat16 *>(data), count, elementsPerRow);
            break;
        case ACL_FLOAT:
            DoPrintData(reinterpret_cast<const float *>(data), count, elementsPerRow);
            break;
        case ACL_DOUBLE:
            DoPrintData(reinterpret_cast<const double *>(data), count, elementsPerRow);
            break;
        default:
            ERROR_LOG("Unsupported type: %d", dataType);
    }
}

void OpRunner::PrintInput(size_t index, size_t numElementsPerRow)
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numInputs_);
        return;
    }

    auto desc = opDesc_->inputDesc[index];
    PrintData(hostInputs_[index], GetInputElementCount(index), aclGetTensorDescType(desc), numElementsPerRow);
}

void OpRunner::PrintOutput(size_t index, size_t numElementsPerRow)
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return;
    }

    auto desc = opDesc_->outputDesc[index];
    PrintData(hostOutputs_[index], GetOutputElementCount(index), aclGetTensorDescType(desc), numElementsPerRow);
}

