/**
* @file memory.cpp
*
* Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All Rights reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/

#include "acl/acl_rt.h"
#include "runtime/mem.h"
#include "runtime/dev.h"
#include "log_inner.h"
#include "common_inner.h"
#include "error_codes_inner.h"
#include "utils/math_utils.h"
#include "toolchain/profiling_manager.h"
#include "toolchain/resource_statistics.h"

namespace {
inline aclError MemcpyKindTranslate(const aclrtMemcpyKind kind, rtMemcpyKind_t &rtKind)
{
    switch (kind) {
        case ACL_MEMCPY_HOST_TO_DEVICE: {
            rtKind = RT_MEMCPY_HOST_TO_DEVICE;
            break;
        }
        case ACL_MEMCPY_DEVICE_TO_DEVICE: {
            rtKind = RT_MEMCPY_DEVICE_TO_DEVICE;
            break;
        }
        case ACL_MEMCPY_DEVICE_TO_HOST: {
            rtKind = RT_MEMCPY_DEVICE_TO_HOST;
            break;
        }
        case ACL_MEMCPY_HOST_TO_HOST: {
            rtKind = RT_MEMCPY_HOST_TO_HOST;
            break;
        }
        default: {
            ACL_LOG_ERROR("[Check][MemcpyKindTranslate]param kind invalid, which is %d.", static_cast<int32_t>(kind));
            acl::AclErrorLogManager::ReportInputError(acl::INVALID_PARAM_MSG,
                std::vector<std::string>({"param", "value", "reason"}),
                std::vector<std::string>({"kind", std::to_string(kind),
                "Memcpy kind should be ACL_MEMCPY_HOST_TO_DEVICE,"
                "ACL_MEMCPY_DEVICE_TO_DEVICE, ACL_MEMCPY_DEVICE_TO_HOST or ACL_MEMCPY_HOST_TO_HOST."}));
            return ACL_ERROR_INVALID_PARAM;
        }
    }
    return ACL_SUCCESS;
}

aclError CheckMemcpy2dParam(const void *const dst, const size_t dpitch, const void *const src, const size_t spitch,
    const size_t width, const size_t height, const aclrtMemcpyKind kind, rtMemcpyKind_t &rtKind)
{
    ACL_LOG_DEBUG("start to execute CheckMemcpy2dParam");
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(dst);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(src);
    ACL_REQUIRES_POSITIVE(height);
    ACL_REQUIRES_POSITIVE(width);

    if ((width > spitch) || (width > dpitch)) {
        ACL_LOG_ERROR("[Check][Width]input param width[%zu] must be smaller than spitch[%zu] and dpitch[%zu]",
            width, spitch, dpitch);
        acl::AclErrorLogManager::ReportInputError(acl::INVALID_PARAM_MSG,
            std::vector<std::string>({"param", "value", "reason"}),
            std::vector<std::string>({"width", std::to_string(width), "must be smaller than spitch and dpitch"}));
        return ACL_ERROR_INVALID_PARAM;
    }

    switch (kind) {
        case ACL_MEMCPY_HOST_TO_DEVICE: {
            rtKind = RT_MEMCPY_HOST_TO_DEVICE;
            break;
        }
        case ACL_MEMCPY_DEVICE_TO_HOST: {
            rtKind = RT_MEMCPY_DEVICE_TO_HOST;
            break;
        }
        default: {
            ACL_LOG_ERROR("[Check][Kind]invalid kind of memcpy, kind = %d", static_cast<int32_t>(kind));
            acl::AclErrorLogManager::ReportInputError(acl::INVALID_PARAM_MSG,
                std::vector<std::string>({"param", "value", "reason"}),
                std::vector<std::string>({"kind", std::to_string(kind), "invalid kind of memcpy"}));
            return ACL_ERROR_INVALID_PARAM;
        }
    }
    return ACL_SUCCESS;
}
}

namespace acl {
aclError aclMallocMemInner(void **devPtr, const size_t size, bool isPadding,
                           const aclrtMemMallocPolicy policy, const uint16_t moduleId)
{
    ACL_PROFILING_REG(acl::AclProfType::AclMallocMemInner);
    ACL_ADD_APPLY_TOTAL_COUNT(acl::ACL_STATISTICS_MALLOC_FREE);
    ACL_LOG_DEBUG("start to execute aclMallocMemInner, size = %zu", size);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(devPtr);
    // size must be greater than zero
    if (size == 0UL) {
        ACL_LOG_ERROR("malloc size must be greater than zero");
        acl::AclErrorLogManager::ReportInputError(acl::INVALID_PARAM_MSG,
            std::vector<std::string>({"param", "value", "reason"}),
            std::vector<std::string>({"size", std::to_string(size), "size must be greater than zero"}));
        return ACL_ERROR_INVALID_PARAM;
    }
    size_t alignedSize;
    ACL_REQUIRES_OK(acl::GetAlignedAndPaddingSize(size, isPadding, alignedSize));
    uint32_t flags = RT_MEMORY_DEFAULT;
    if (policy == ACL_MEM_MALLOC_HUGE_FIRST) {
        flags |= RT_MEMORY_POLICY_HUGE_PAGE_FIRST;
    } else if (policy == ACL_MEM_MALLOC_HUGE_ONLY) {
        flags |= RT_MEMORY_POLICY_HUGE_PAGE_ONLY;
    } else if (policy == ACL_MEM_MALLOC_NORMAL_ONLY) {
        flags |= RT_MEMORY_POLICY_DEFAULT_PAGE_ONLY;
    } else if (policy == ACL_MEM_MALLOC_HUGE_FIRST_P2P) {
        flags |= RT_MEMORY_POLICY_HUGE_PAGE_FIRST_P2P;
    } else if (policy == ACL_MEM_MALLOC_HUGE_ONLY_P2P) {
        flags |= RT_MEMORY_POLICY_HUGE_PAGE_ONLY_P2P;
    } else if (policy == ACL_MEM_MALLOC_NORMAL_ONLY_P2P) {
        flags |= RT_MEMORY_POLICY_DEFAULT_PAGE_ONLY_P2P;
    } else {
        flags = RT_MEMORY_DEFAULT;
    }
    const rtError_t rtErr = rtMalloc(devPtr, alignedSize, flags, moduleId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("alloc device memory failed, runtime result = %d", rtErr);
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_APPLY_SUCCESS_COUNT(acl::ACL_STATISTICS_MALLOC_FREE);
    return ACL_SUCCESS;
}
} // namespace acl

aclError aclrtMalloc(void **devPtr, size_t size, aclrtMemMallocPolicy policy)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMalloc);
    ACL_LOG_DEBUG("start to execute aclrtMalloc, size = %zu", size);
    return acl::aclMallocMemInner(devPtr, size, true, policy, acl::APP_MODE_ID_U16);
}

aclError aclrtMallocAlign32(void **devPtr, size_t size, aclrtMemMallocPolicy policy)
{
    ACL_LOG_DEBUG("start to execute aclrtMallocAlign32, size = %zu", size);
    return acl::aclMallocMemInner(devPtr, size, false, policy, acl::APP_MODE_ID_U16);
}

aclError aclrtMallocCached(void **devPtr, size_t size, aclrtMemMallocPolicy policy)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMallocCached);
    ACL_ADD_APPLY_TOTAL_COUNT(acl::ACL_STATISTICS_MALLOC_FREE);
    ACL_LOG_DEBUG("start to execute aclrtMallocCached, size = %zu", size);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(devPtr);

    if (size == 0UL) {
        ACL_LOG_INNER_ERROR("malloc size must be greater than zero");
        return ACL_ERROR_INVALID_PARAM;
    }
    size_t alignedSize;
    ACL_REQUIRES_OK(acl::GetAlignedAndPaddingSize(size, true, alignedSize));
    uint32_t cacheFlags = RT_MEMORY_DEFAULT;
    if (policy == ACL_MEM_MALLOC_HUGE_FIRST) {
        cacheFlags |= RT_MEMORY_POLICY_HUGE_PAGE_FIRST;
    } else if (policy == ACL_MEM_MALLOC_HUGE_ONLY) {
        cacheFlags |= RT_MEMORY_POLICY_HUGE_PAGE_ONLY;
    } else if (policy == ACL_MEM_MALLOC_NORMAL_ONLY) {
        cacheFlags |= RT_MEMORY_POLICY_DEFAULT_PAGE_ONLY;
    } else {
        cacheFlags = RT_MEMORY_DEFAULT;
    }
    const rtError_t rtErr = rtMallocCached(devPtr, alignedSize, cacheFlags, acl::APP_MODE_ID_U16);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("alloc device memory with cache failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_APPLY_SUCCESS_COUNT(acl::ACL_STATISTICS_MALLOC_FREE);
    return ACL_SUCCESS;
}

aclError aclrtMemFlush(void *devPtr, size_t size)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemFlush);
    ACL_LOG_DEBUG("start to execute aclrtMemFlush, size = %zu", size);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(devPtr);

    if (size == 0UL) {
        ACL_LOG_INNER_ERROR("flush cache size must be greater than zero");
        return ACL_ERROR_INVALID_PARAM;
    }
    const rtError_t rtErr = rtFlushCache(devPtr, size);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("flush cache data to ddr failed, runtime result = %d, size = %zu",
            static_cast<int32_t>(rtErr), size);
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    return ACL_SUCCESS;
}

aclError aclrtMemInvalidate(void *devPtr, size_t size)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemInvalidate);
    ACL_LOG_INFO("start to execute aclrtMemInvalidate, size = %zu", size);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(devPtr);

    if (size == 0UL) {
        ACL_LOG_INNER_ERROR("invalidate cache size must be greater than zero");
        return ACL_ERROR_INVALID_PARAM;
    }
    const rtError_t rtErr = rtInvalidCache(devPtr, size);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("invalidate cache data failed, runtime result = %d, size = %zu",
            static_cast<int32_t>(rtErr), size);
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    return ACL_SUCCESS;
}

aclError aclrtFree(void *devPtr)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtFree);
    ACL_ADD_RELEASE_TOTAL_COUNT(acl::ACL_STATISTICS_MALLOC_FREE);
    ACL_LOG_DEBUG("start to execute aclrtFree");
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(devPtr);

    const rtError_t rtErr = rtFree(devPtr);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("free device memory failed, runtime result = %d", rtErr);
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_RELEASE_SUCCESS_COUNT(acl::ACL_STATISTICS_MALLOC_FREE);
    return ACL_SUCCESS;
}

aclError aclrtMallocHost(void **hostPtr, size_t size)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMallocHost);
    ACL_ADD_APPLY_TOTAL_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_HOST);
    ACL_LOG_DEBUG("start to execute aclrtMallocHost, size = %zu", size);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(hostPtr);
    // size must be greater than zero
    if (size == 0UL) {
        ACL_LOG_INNER_ERROR("malloc size must be greater than zero");
        return ACL_ERROR_INVALID_PARAM;
    }
    const rtError_t rtErr = rtMallocHost(hostPtr, size, acl::APP_MODE_ID_U16);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("alloc host memory failed, runtime result = %d", rtErr);
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_APPLY_SUCCESS_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_HOST);
    return ACL_SUCCESS;
}

aclError aclrtFreeHost(void *hostPtr)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtFreeHost);
    ACL_ADD_RELEASE_TOTAL_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_HOST);
    ACL_LOG_DEBUG("start to execute aclrtFreeHost");
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(hostPtr);
    const rtError_t rtErr = rtFreeHost(hostPtr);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("free host memory failed, runtime result = %d", rtErr);
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_RELEASE_SUCCESS_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_HOST);
    return ACL_SUCCESS;
}

aclError aclrtMemcpy(void *dst,
                     size_t destMax,
                     const void *src,
                     size_t count,
                     aclrtMemcpyKind kind)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemcpy);
    ACL_LOG_INFO("start to execute aclrtMemcpy, destMaxSize = %zu, srcSize = %zu, kind = %d",
        destMax, count, static_cast<int32_t>(kind));
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(dst);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(src);

    rtMemcpyKind_t rtKind = RT_MEMCPY_RESERVED;
    const aclError ret = MemcpyKindTranslate(kind, rtKind);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("invalid kind of memcpy, kind = %d", static_cast<int32_t>(kind));
        return ret;
    }

    const rtError_t rtErr = rtMemcpy(dst, destMax, src, count, rtKind);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("synchronized memcpy failed, kind = %d, runtime result = %d",
            static_cast<int32_t>(kind), static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    return ACL_SUCCESS;
}

aclError aclrtMemset(void *devPtr, size_t maxCount, int32_t value, size_t count)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemset);
    ACL_LOG_DEBUG("start to execute aclrtMemset, maxSize = %zu, size = %zu, value = %d",
        maxCount, count, value);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(devPtr);

    const rtError_t rtErr = rtMemset(devPtr, maxCount, static_cast<uint32_t>(value), count);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("set memory failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    return ACL_SUCCESS;
}

aclError aclrtMemcpyAsync(void *dst,
                          size_t destMax,
                          const void *src,
                          size_t count,
                          aclrtMemcpyKind kind,
                          aclrtStream stream)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemcpyAsync);
    ACL_LOG_DEBUG("start to execute aclrtMemcpyAsync, destMaxSize = %zu, srcSize = %zu, kind = %d",
        destMax, count, static_cast<int32_t>(kind));
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(dst);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(src);
    rtMemcpyKind_t rtKindVal = RT_MEMCPY_RESERVED;
    const aclError ret = MemcpyKindTranslate(kind, rtKindVal);
    if (ret != ACL_SUCCESS) {
        ACL_LOG_INNER_ERROR("invalid kind of memcpy, kind = %d", static_cast<int32_t>(kind));
        return ret;
    }

    const rtError_t rtErr = rtMemcpyAsync(dst, destMax, src, count, rtKindVal, static_cast<rtStream_t>(stream));
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("asynchronized memcpy failed, kind = %d, runtime result = %d",
            static_cast<int32_t>(kind), static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    return ACL_SUCCESS;
}

aclError aclrtMemsetAsync(void *devPtr, size_t maxCount, int32_t value, size_t count, aclrtStream stream)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemsetAsync);
    ACL_LOG_DEBUG("start to execute aclrtMemsetAsync, maxCount = %zu, value = %d, count = %zu",
        maxCount, value, count);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(devPtr);

    const rtError_t rtErr = rtMemsetAsync(devPtr, maxCount, static_cast<uint32_t>(value), count, stream);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("asynchronized memset failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    return ACL_SUCCESS;
}

aclError aclrtDeviceCanAccessPeer(int32_t *canAccessPeer, int32_t deviceId, int32_t peerDeviceId)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtDeviceCanAccessPeer);
    ACL_LOG_INFO("start to execute aclrtDeviceCanAccessPeer");

    if (deviceId == peerDeviceId) {
        ACL_LOG_INNER_ERROR("deviceId shouldn't be equal to peeerDeviceId");
        return ACL_ERROR_INVALID_PARAM;
    }

    uint32_t peerPhyId = 0U;
    rtError_t rtErr = rtGetDevicePhyIdByIndex(static_cast<uint32_t>(peerDeviceId), &peerPhyId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtGetDevicePhyIdByIndex failed, deviceId = %d, peerDeviceId = %d, "
            "runtime result = %d", deviceId, peerDeviceId, static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    rtErr = rtDeviceCanAccessPeer(canAccessPeer, static_cast<uint32_t>(deviceId), peerPhyId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtDeviceCanAccessPeer failed, deviceId = %d, peerPhyId = %u, "
            "runtime result = %d", deviceId, peerPhyId, static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    return ACL_SUCCESS;
}

aclError aclrtDeviceEnablePeerAccess(int32_t peerDeviceId, uint32_t flags)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtDeviceEnablePeerAccess);
    ACL_LOG_INFO("start to execute aclrtDeviceEnablePeerAccess");

    if (flags != 0U) {
        ACL_LOG_INNER_ERROR("the flags must be 0, but current is %u", flags);
        return ACL_ERROR_FEATURE_UNSUPPORTED;
    }

    int32_t deviceId = 0;
    rtError_t rtErr = rtGetDevice(&deviceId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtGetDevice failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    if (deviceId == peerDeviceId) {
        ACL_LOG_INNER_ERROR("deviceId shouldn't be equal to peeerDeviceId, deviceId = %d, peerDeviceId = %d",
            deviceId, peerDeviceId);
        return ACL_ERROR_INVALID_PARAM;
    }

    uint32_t peerPhyId = 0U;
    rtErr = rtGetDevicePhyIdByIndex(static_cast<uint32_t>(peerDeviceId), &peerPhyId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtGetDevicePhyIdByIndex failed, peerDeviceId = %d, runtime result = %d",
            peerDeviceId, static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    rtErr = rtEnableP2P(static_cast<uint32_t>(deviceId), peerPhyId, flags);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtEnableP2P failed, deviceId = %d, peerPhyId = %u, runtime result = %d",
            deviceId, peerPhyId, static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    return ACL_SUCCESS;
}

aclError aclrtDeviceDisablePeerAccess(int32_t peerDeviceId)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtDeviceDisablePeerAccess);
    ACL_LOG_INFO("start to execute aclrtDeviceDisablePeerAccess");

    int32_t deviceId = 0;
    rtError_t rtErr = rtGetDevice(&deviceId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtGetDevice failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    if (deviceId == peerDeviceId) {
        ACL_LOG_INNER_ERROR("deviceId shouldn't be equal to peeerDeviceId, deviceId = %d, peerDeviceId = %d",
            deviceId, peerDeviceId);
        return ACL_ERROR_INVALID_PARAM;
    }

    uint32_t peerPhyId = 0U;
    rtErr = rtGetDevicePhyIdByIndex(static_cast<uint32_t>(peerDeviceId), &peerPhyId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtGetDevicePhyIdByIndex failed, peerDeviceId = %u, runtime result = %d",
            peerDeviceId, static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    rtErr = rtDisableP2P(static_cast<uint32_t>(deviceId), peerPhyId);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("call rtDisableP2P failed, deviceId = %d, peerPhyId = %u, runtime result = %d",
            deviceId, peerPhyId, static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    return ACL_SUCCESS;
}

aclError aclrtGetMemInfo(aclrtMemAttr attr, size_t *free, size_t *total)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtGetMemInfo);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(free);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(total);
    ACL_LOG_DEBUG("start to execute aclrtGetMemInfo, memory attribute = %d", static_cast<int32_t>(attr));

    const rtError_t rtErr = rtMemGetInfoEx(static_cast<rtMemInfoType_t>(attr), free, total);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("get memory information failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    ACL_LOG_DEBUG("successfully execute aclrtGetMemInfo, memory attribute = %d, free memory = %zu bytes, "
        "total memory = %zu bytes", static_cast<int32_t>(attr), *free, *total);
    return ACL_SUCCESS;
}

aclError aclrtMemcpy2d(void *dst,
                       size_t dpitch,
                       const void *src,
                       size_t spitch,
                       size_t width,
                       size_t height,
                       aclrtMemcpyKind kind)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemcpy2d);
    ACL_LOG_DEBUG("start to execute aclrtMemcpy2d, dpitch = %zu, spitch = %zu, width = %zu, height = %zu, kind = %d",
        dpitch, spitch, width, height, static_cast<int32_t>(kind));

    rtMemcpyKind_t rtKind = RT_MEMCPY_RESERVED;
    const aclError ret = CheckMemcpy2dParam(dst, dpitch, src, spitch, width, height, kind, rtKind);
    if (ret != ACL_SUCCESS) {
        return ret;
    }

    const rtError_t rtErr = rtMemcpy2d(dst, dpitch, src, spitch, width, height, rtKind);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("[Synchronized][Memcpy]synchronized memcpy failed, kind = %d, runtime result = %d",
            static_cast<int32_t>(kind), static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    ACL_LOG_DEBUG("Successfuly execute aclrtMemcpy2d, dpitch = %zu, spitch = %zu, width = %zu, height = %zu, "
        "kind = %d", dpitch, spitch, width, height, static_cast<int32_t>(kind));
    return ACL_SUCCESS;
}

aclError aclrtMemcpy2dAsync(void *dst,
                            size_t dpitch,
                            const void *src,
                            size_t spitch,
                            size_t width,
                            size_t height,
                            aclrtMemcpyKind kind,
                            aclrtStream stream)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemcpy2dAsync);
    ACL_LOG_DEBUG("start to execute aclrtMemcpy2dAsync, dpitch = %zu, spitch = %zu, width = %zu, height = %zu,"
        " kind = %d", dpitch, spitch, width, height, static_cast<int32_t>(kind));

    rtMemcpyKind_t rtKindVal = RT_MEMCPY_RESERVED;
    const aclError ret = CheckMemcpy2dParam(dst, dpitch, src, spitch, width, height, kind, rtKindVal);
    if (ret != ACL_SUCCESS) {
        return ret;
    }

    const rtError_t rtErr = rtMemcpy2dAsync(dst, dpitch, src, spitch, width, height, rtKindVal, stream);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("[Asynchronized][Memcpy]asynchronized memcpy failed, kind = %d, runtime result = %d",
            static_cast<int32_t>(kind), static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }

    ACL_LOG_DEBUG("Successfuly execute aclrtMemcpy2dAsync, dpitch = %zu, spitch = %zu, width = %zu, height = %zu, "
        "kind = %d", dpitch, spitch, width, height, static_cast<int32_t>(kind));
    return ACL_SUCCESS;
}

aclError aclrtReserveMemAddress(void **virPtr,
                                size_t size,
                                size_t alignment,
                                void *expectPtr,
                                uint64_t flags)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtReserveMemAddress);
    ACL_ADD_APPLY_TOTAL_COUNT(acl::ACL_STATISTICS_RESERVE_RELEASE_MEMORY_ADDRESS);
    ACL_LOG_DEBUG("start to execute aclrtReserveMemAddress, size = %zu", size);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(virPtr);

    if (size == 0UL) {
        ACL_LOG_ERROR("reserve size must be greater than zero");
        return ACL_ERROR_INVALID_PARAM;
    }
    if (expectPtr != nullptr) {
        ACL_LOG_ERROR("expectPtr must be nullptr");
        return ACL_ERROR_INVALID_PARAM;
    }
    if ((flags != 1ULL) && (flags != 0ULL)) {
        ACL_LOG_ERROR("flags of page type must be 0 or 1");
        return ACL_ERROR_INVALID_PARAM;
    }

    const rtError_t rtErr = rtReserveMemAddress(virPtr, size, alignment, expectPtr, flags);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("reserve memory address failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_APPLY_SUCCESS_COUNT(acl::ACL_STATISTICS_RESERVE_RELEASE_MEMORY_ADDRESS);
    return ACL_SUCCESS;
}

aclError aclrtReleaseMemAddress(void *virPtr)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtReleaseMemAddress);
    ACL_ADD_RELEASE_TOTAL_COUNT(acl::ACL_STATISTICS_RESERVE_RELEASE_MEMORY_ADDRESS);
    ACL_LOG_DEBUG("start to execute aclrtReleaseMemAddress");
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(virPtr);

    const rtError_t rtErr = rtReleaseMemAddress(virPtr);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("release memory address failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_RELEASE_SUCCESS_COUNT(acl::ACL_STATISTICS_RESERVE_RELEASE_MEMORY_ADDRESS);
    return ACL_SUCCESS;
}

aclError aclrtMallocPhysical(aclrtDrvMemHandle *handle,
                             size_t size,
                             const aclrtPhysicalMemProp *prop,
                             uint64_t flags)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMallocPhysical);
    ACL_ADD_APPLY_TOTAL_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_PHYSICAL_MEMORY);
    ACL_LOG_DEBUG("start to execute aclrtMallocPhysical, size = %zu", size);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(handle);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(prop);
    ACL_CHECK_WITH_MESSAGE_AND_RETURN(size != 0UL, ACL_ERROR_INVALID_PARAM,
                                      "malloc size must be greater than zero");
    ACL_CHECK_WITH_MESSAGE_AND_RETURN(flags == 0UL, ACL_ERROR_INVALID_PARAM,
                                      "flags must be 0");
    ACL_CHECK_WITH_MESSAGE_AND_RETURN(prop->handleType == ACL_MEM_HANDLE_TYPE_NONE,
                                      ACL_ERROR_INVALID_PARAM, "handleType must be ACL_MEM_HANDLE_TYPE_NONE");
    ACL_CHECK_WITH_MESSAGE_AND_RETURN(prop->allocationType == ACL_MEM_ALLOCATION_TYPE_PINNED,
                                      ACL_ERROR_INVALID_PARAM,
                                      "allocationType must be ACL_MEM_ALLOCATION_TYPE_PINNED");

    rtDrvMemProp_t rtProp = {};
    rtProp.side = prop->location.type;
    rtProp.devid = prop->location.id;
    rtProp.module_id = acl::APP_MODE_ID_U16;
    rtProp.reserve = prop->reserve;
    switch (prop->memAttr) {
        case ACL_HBM_MEM_HUGE: {
            rtProp.pg_type = 1UL;
            rtProp.mem_type = 0UL;
            break;
        }
        case ACL_HBM_MEM_NORMAL: {
            rtProp.pg_type = 0UL;
            rtProp.mem_type = 0UL;
            break;
        }
        default: {
            ACL_LOG_ERROR("memAttr support ACL_HBM_MEM_HUGE or ACL_HBM_MEM_NORMAL");
            return ACL_ERROR_INVALID_PARAM;
        }
    }

    const rtError_t rtErr = rtMallocPhysical(reinterpret_cast<rtDrvMemHandle_t**>(handle), size, &rtProp, flags);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("malloc physical memory failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_APPLY_SUCCESS_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_PHYSICAL_MEMORY);
    return ACL_SUCCESS;
}

aclError aclrtFreePhysical(aclrtDrvMemHandle handle)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtFreePhysical);
    ACL_ADD_RELEASE_TOTAL_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_PHYSICAL_MEMORY);
    ACL_LOG_DEBUG("start to execute aclrtFreePhysical");
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(handle);

    const rtError_t rtErr = rtFreePhysical(reinterpret_cast<rtDrvMemHandle_t*>(handle));
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("free physical memory failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_RELEASE_SUCCESS_COUNT(acl::ACL_STATISTICS_MALLOC_FREE_PHYSICAL_MEMORY);
    return ACL_SUCCESS;
}

aclError aclrtMapMem(void *virPtr,
                     size_t size,
                     size_t offset,
                     aclrtDrvMemHandle handle,
                     uint64_t flags)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMapMem);
    ACL_ADD_APPLY_TOTAL_COUNT(acl::ACL_STATISTICS_MAP_UNMAP_MEMORY);
    ACL_LOG_DEBUG("start to execute aclrtMapMem, size = %zu, offset = %zu", size, offset);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(virPtr);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(handle);

    if (size == 0UL) {
        ACL_LOG_ERROR("map size must be greater than zero");
        return ACL_ERROR_INVALID_PARAM;
    }
    if (flags != 0UL) {
        ACL_LOG_ERROR("flags must be 0");
        return ACL_ERROR_INVALID_PARAM;
    }

    const rtError_t rtErr = rtMapMem(virPtr, size, offset, reinterpret_cast<rtDrvMemHandle_t*>(handle), flags);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("map memory failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_APPLY_SUCCESS_COUNT(acl::ACL_STATISTICS_MAP_UNMAP_MEMORY);
    return ACL_SUCCESS;
}

aclError aclrtUnmapMem(void *virPtr)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtUnmapMem);
    ACL_ADD_RELEASE_TOTAL_COUNT(acl::ACL_STATISTICS_MAP_UNMAP_MEMORY);
    ACL_LOG_DEBUG("start to execute aclrtUnmapMem");
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(virPtr);

    const rtError_t rtErr = rtUnmapMem(virPtr);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("unmap memory failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    ACL_ADD_RELEASE_SUCCESS_COUNT(acl::ACL_STATISTICS_MAP_UNMAP_MEMORY);
    return ACL_SUCCESS;
}

aclError aclrtMemExportToShareableHandle(aclrtDrvMemHandle handle,
                                         aclrtMemHandleType handleType, uint64_t flags,
                                         uint64_t *shareableHandle)
{
  ACL_PROFILING_REG(acl::AclProfType::AclrtMemExportToShareableHandle);
  ACL_LOG_DEBUG("start to execute aclrtMemExportToShareableHandle");
  ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(handle);
  ACL_REQUIRES_TRUE(handleType == ACL_MEM_HANDLE_TYPE_NONE, ACL_ERROR_INVALID_PARAM,
                    "handleType in MemExportToShareableHandle must be ACL_MEM_HANDLE_TYPE_NONE !");
  ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(shareableHandle);
  ACL_REQUIRES_TRUE(flags == 0UL, ACL_ERROR_INVALID_PARAM,
                    "flags in MemExportToShareableHandle must be 0 !");

  const rtError_t rtErr = rtMemExportToShareableHandle(reinterpret_cast<rtDrvMemHandle_t*>(handle),
                                                       RT_MEM_HANDLE_TYPE_NONE, flags, shareableHandle);
  if (rtErr != RT_ERROR_NONE) {
    ACL_LOG_CALL_ERROR("export shareable handle failed, runtime result = %d", static_cast<int32_t>(rtErr));
    return ACL_GET_ERRCODE_RTS(rtErr);
  }
  return ACL_SUCCESS;
}

aclError aclrtMemImportFromShareableHandle(uint64_t shareableHandle,
                                           int32_t deviceId, aclrtDrvMemHandle *handle)
{
  ACL_PROFILING_REG(acl::AclProfType::AclrtMemImportFromShareableHandle);
  ACL_LOG_DEBUG("start to execute aclrtMemImportFromShareableHandle");
  ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(handle);

  const rtError_t rtErr = rtMemImportFromShareableHandle(shareableHandle, deviceId,
                                                         reinterpret_cast<rtDrvMemHandle_t**>(&handle));
  if (rtErr != RT_ERROR_NONE) {
    ACL_LOG_CALL_ERROR("import from shareable handle failed, shareableHandle[%lu], deviceId[%d], runtime result = %d",
                       shareableHandle, deviceId, static_cast<int32_t>(rtErr));
    return ACL_GET_ERRCODE_RTS(rtErr);
  }
  return ACL_SUCCESS;
}

aclError aclrtMemSetPidToShareableHandle(uint64_t shareableHandle, int32_t *pid, size_t pidNum)
{
  ACL_PROFILING_REG(acl::AclProfType::AclrtMemSetPidToShareableHandle);
  ACL_LOG_DEBUG("start to execute aclrtMemSetPidToShareableHandle");
  ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(pid);
  ACL_REQUIRES_TRUE(pidNum > 0UL, ACL_ERROR_INVALID_PARAM,
                    "pidNum in SetPidToShareableHandle must be large than 0 !");

  const rtError_t rtErr = rtMemSetPidToShareableHandle(shareableHandle, pid, static_cast<uint32_t>(pidNum));
  if (rtErr != RT_ERROR_NONE) {
    ACL_LOG_CALL_ERROR("set pid to shareable handle failed, shareableHandle[%lu], pidNum[%zu], runtime result = %d",
                       shareableHandle, pidNum, static_cast<int32_t>(rtErr));
    return ACL_GET_ERRCODE_RTS(rtErr);
  }
  return ACL_SUCCESS;
}

aclError aclrtMemGetAllocationGranularity(aclrtPhysicalMemProp *prop, aclrtMemGranularityOptions option,
                                          size_t *granularity)
{
    ACL_PROFILING_REG(acl::AclProfType::AclrtMemGetAllocationGranularity);
    ACL_LOG_DEBUG("start to execute aclrtMemGetAllocationGranularity");
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(prop);
    ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(granularity);

    rtDrvMemProp_t rtProp1 = {};
    rtProp1.side = prop->location.type;
    rtProp1.devid = prop->location.id;
    rtProp1.module_id = acl::APP_MODE_ID_U16;
    rtProp1.reserve = prop->reserve;
    switch (prop->memAttr) {
        case ACL_HBM_MEM_HUGE: {
            rtProp1.pg_type = 1U;
            rtProp1.mem_type = 0U;
            break;
        }
        case ACL_HBM_MEM_NORMAL: {
            rtProp1.pg_type = 0U;
            rtProp1.mem_type = 0U;
            break;
        }
        default: {
            ACL_LOG_ERROR("memAttr [%d] support ACL_HBM_MEM_HUGE or ACL_HBM_MEM_NORMAL",
                static_cast<int32_t>(prop->memAttr));
            return ACL_ERROR_INVALID_PARAM;
        }
    }

    const rtError_t rtErr = rtMemGetAllocationGranularity(&rtProp1,
        static_cast<rtDrvMemGranularityOptions>(option), granularity);
    if (rtErr != RT_ERROR_NONE) {
        ACL_LOG_CALL_ERROR("Get Allocation Granularity failed, runtime result = %d", static_cast<int32_t>(rtErr));
        return ACL_GET_ERRCODE_RTS(rtErr);
    }
    return ACL_SUCCESS;
}

aclError aclrtDeviceGetBareTgid(int32_t *pid)
{
  ACL_PROFILING_REG(acl::AclProfType::AclrtDeviceGetBareTgid);
  ACL_LOG_DEBUG("start to execute aclrtDeviceGetBareTgid");
  ACL_REQUIRES_NOT_NULL_WITH_INPUT_REPORT(pid);

  const rtError_t rtErr = rtDeviceGetBareTgid(reinterpret_cast<uint32_t *>(pid));
  if (rtErr != RT_ERROR_NONE) {
    ACL_LOG_CALL_ERROR("Get Bare Tgid Falied, runtime result = %d", static_cast<int32_t>(rtErr));
    return ACL_GET_ERRCODE_RTS(rtErr);
  }
  return ACL_SUCCESS;
}