/**
 * @file op_runner.cpp
 *
 * Copyright (C) 2023-2024. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include <mpi.h>
#include "op_runner.h"
#include <cassert>
#include <limits>
#include <bitset>
#include "acl/acl_op_compiler.h"
#include "aclnn_quant_matmul_v2.h"
#include "common.h"
#include <sys/mman.h>
#include <asm-generic/mman-common.h>
#include "gmem_test_comlib.h"
#include <sys/ioctl.h>
#include "rpg_dev.h"
#include "mem_share.h"

using namespace std;

extern bool g_isDevice;

#define MEMFLAG 1

int rank_size = std::atoi(std::getenv("RANK_SIZE"));
int p_value_main = std::atoi(std::getenv("P_VALUE"));

int malloc_routing(void **addr, size_t size, bool g_isDevice, uint32_t rankId, bool shared){
    if (MEMFLAG == 0 || !shared){
        if (g_isDevice){
        return aclrtMalloc(addr, size, ACL_MEM_MALLOC_HUGE_FIRST);
        } else {
        return aclrtMallocHost(addr, size);
        }
    }else{
		/*
		if (aclrtMalloc(addr, size, ACL_MEM_MALLOC_HUGE_FIRST) == 0) {
			INFO_LOG("Malloc return addr %lx\n", (unsigned long)(*addr));
			aclrtFree(*addr);
			INFO_LOG("use addr %lx to mmap\n", (unsigned long)(*addr));
			*addr = mmap(*addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | 0x1000000, -1, 0);
			INFO_LOG("mmap return addr %lx\n", (unsigned long)(*addr));
			if (*addr != MAP_FAILED) {
				return ACL_SUCCESS;
			} else {
				return ACL_ERROR_BAD_ALLOC;
			}
		}
		*/
        static void *start_addr = (void *)0xfff000000000;
        void *end_addr = (void *)0xfffff0000000;
        long long int stride = (size / 1048576 + 1) * 1048576 * 2;
        printf("MAP_PEER_SHARED: %llx\n", MAP_PEER_SHARED);
        if (*addr == nullptr) {
            for (void *try_addr = start_addr; try_addr <= end_addr; try_addr += stride) {
                *addr = mmap(try_addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | 0x1000000, -1, 0);
                if (*addr != MAP_FAILED) {
					//INFO_LOG("mmap return addr %lx, size %lx\n", (unsigned long)(*addr), size);
                    return ACL_SUCCESS;
                }
            }
        } else {
            *addr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | 0x1000000, -1, 0);
            if (*addr != MAP_FAILED) {
				(void)gmem_prefetch(addr, size, rankId);
                return ACL_SUCCESS;
            }
        }

        return ACL_ERROR_BAD_ALLOC;
		
	}

}


OpRunner::OpRunner(OperatorDesc *opDesc, int rank_id) : opDesc_(opDesc)
{
    numInputs_ = opDesc->inputDesc.size();
    numOutputs_ = opDesc->outputDesc.size();
    workspace_ = nullptr;
    rankId = rank_id;
}

OpRunner::~OpRunner()
{
    if (MEMFLAG == 0){
        if (workspace_ != nullptr) {
            (void)aclrtFree(workspace_);
        }

        for (size_t i = 0; i < numInputs_; ++i) {
            (void)aclDestroyTensor(inputTensor_[i]);
            // (void)aclDestroyDataBuffer(inputBuffers_[i]);
            (void)aclrtFree(devInputs_[i]);
            if (g_isDevice) {
                (void)aclrtFree(hostInputs_[i]);
            } else {
                (void)aclrtFreeHost(hostInputs_[i]);
            }
        }

        for (size_t i = 0; i < numOutputs_; ++i) {
            (void)aclDestroyTensor(outputTensor_[i]);
            // (void)aclDestroyDataBuffer(outputBuffers_[i]);
            (void)aclrtFree(devOutputs_[i]);
            if (g_isDevice) {
                (void)aclrtFree(hostOutputs_[i]);
            } else {
                (void)aclrtFreeHost(hostOutputs_[i]);
            }
        }
    }
    else{
        
        if (workspace_ != nullptr) {
            if (munmap(workspace_, workspaceSize_) == -1) {
            perror("munmap workspace");
            printf("gmem munmap %zu bytes of memory FAILED\n", workspaceSize_);
        }
        }

        for (size_t i = 0; i < numInputs_; ++i) {
            (void)aclDestroyTensor(inputTensor_[i]);
            // (void)aclDestroyDataBuffer(inputBuffers_[i]);
            if (munmap(devInputs_[i], inputsSize_[i]) == -1){
                perror("munmap devInputs");
                printf("gmem munmap %zu bytes of memory %d FAILED\n", inputsSize_[i], i);
            }
            
            if (munmap(hostInputs_[i], inputsSize_[i]) == -1){
                perror("munmap hostInputs");
                printf("gmem munmap %zu bytes of memory %d FAILED\n", inputsSize_[i], i);
            };
        }

        for (size_t i = 0; i < numOutputs_; ++i) {
            (void)aclDestroyTensor(outputTensor_[i]);
            // (void)aclDestroyDataBuffer(outputBuffers_[i]);
            if (i==0 || i == rankId){
                if (munmap(devOutputs_[i], outputsSize_[i]) == -1){
                    perror("munmap devOutputs_");
                    printf("gmem munmap %zu bytes of memory %d FAILED\n", outputsSize_[i], i);
                };
            }
            if (munmap(hostOutputs_[i], outputsSize_[i]) == -1){
                perror("munmap hostOutputs_");
                printf("gmem munmap %zu bytes of memory %d FAILED\n", outputsSize_[i], i);
            };
        }
        
    }
}



bool OpRunner::Init()
{
    // devInputs_ [存储数据的地址，在runop时通过memcpy从hostInputs_处拷贝得到]
    // inputBuffers_ [没有用]
    // hostInputs_ [init时候保存的数据位置]
    // inputTensor_ [管理devInputs_的Tensor对象]
    // hostInputs_ --aclrtmemcpy--> devInputs_


    for (size_t i = 0; i < numInputs_; ++i) {
        auto size = GetInputSize(i);
        inputsSize_.emplace_back(size);
        void *devMem = nullptr;
        INFO_LOG("size: %d;", size);
        if (size == 0) {
            devInputs_.emplace_back(devMem);
            // inputBuffers_.emplace_back(aclhostInputCreateDataBuffer(devMem, size));
            void *hostInput = nullptr;
            hostInputs_.emplace_back(hostInput);
            aclTensor *inputTensor =
                aclCreateTensor(GetInputShape(i).data(), GetInputNumDims(i), GetInputDataType(i), nullptr, 0,
                                GetInputFormat(i), GetInputShape(i).data(), GetInputNumDims(i), nullptr);
            inputTensor_.emplace_back(inputTensor);

            continue;
        }
        
        if (malloc_routing(&devMem, size, true, rankId, false) != ACL_SUCCESS) {
            ERROR_LOG("Malloc devMem memory for input[%zu] failed 0", i);
            return false;
        }

        devInputs_.emplace_back(devMem);
        // inputBuffers_.emplace_back(aclCreateDataBuffer(devMem, size));

        void *hostInput = nullptr;

        if (malloc_routing(&hostInput, size, g_isDevice, rankId, false) != ACL_SUCCESS) {
                ERROR_LOG("Malloc hostInput memory for input[%zu] failed 1", i);
                return false;
        }

        if (hostInput == nullptr) {
            ERROR_LOG("Malloc memory for input[%zu] failed", i);
            return false;
        }
        hostInputs_.emplace_back(hostInput);

        aclTensor *inputTensor;

        inputTensor =
            aclCreateTensor(GetInputShape(i).data(), GetInputNumDims(i), GetInputDataType(i), nullptr, 0,
                            GetInputFormat(i), GetInputShape(i).data(), GetInputNumDims(i), devInputs_[i]);

        
        if (inputTensor == nullptr) {
            ERROR_LOG("Create Tensor for input[%zu] failed", i);
            return false;
        }
        inputTensor_.emplace_back(inputTensor);
    }
    if(rank_size > 1){
	    MPI_Barrier(MPI_COMM_WORLD);
    }
        // each device only need alloc its own output mem
    if (MEMFLAG == 1){
    void *devMem = nullptr;
    size_t size = GetOutputSize(rankId+1);
    if (malloc_routing(&devMem, size, true, rankId, true) != ACL_SUCCESS) {
        ERROR_LOG("Malloc device memory for output[%zu] failed", rankId+1);
        return false;
    }


    //recode addr of this device
    INFO_LOG("device %zu share addr %lx, size %lu", rankId, (unsigned long)devMem, size);
    if(shared_mm_write(rankId, devMem, size) < 0) {
        ERROR_LOG("device[%zu] want share addr %lx and size %zu but failed", rankId, (unsigned long)devMem, size);
        return false;
    }
    (void)gmem_prefetch(devMem, size, rankId);
    }

    
    // devOutputs_
    // outputBuffers_
    // hostOutputs_
    // outputTensor_
    if(rank_size > 1){
    	MPI_Barrier(MPI_COMM_WORLD);
    }
    for (size_t i = 0; i < numOutputs_; ++i) {
        auto size = GetOutputSize(i);
        void *devMem = nullptr;

        static int print_once = 1;
        //outputsSize_.emplace_back(size);

        if (MEMFLAG == 1 && i !=0 && i < rank_size + 1 ) {
			if (shared_mm_read(i-1, &devMem, &size) < 0) {
                if (print_once == 1)
                    ERROR_LOG("device[%zu] get addr[%zu] shared data failed", rankId, i);
                i--;
                print_once = 0;
                continue;
            }
			if (i - 1 != rankId) {
				(void)gmem_prefetch(devMem, size, rankId);
			}
		} else {
			if (malloc_routing(&devMem, size, true, rankId, false) != ACL_SUCCESS) {
                ERROR_LOG("Malloc device memory for output[%zu] failed", i);
                return false;
            }
        }
        INFO_LOG("device[%zu] get: addr[%zu]: %lx, size: %zu", rankId, i, (unsigned long)devMem, size);

        outputsSize_.emplace_back(size);
        devOutputs_.emplace_back(devMem);
        // outputBuffers_.emplace_back(aclCreateDataBuffer(devMem, size));

        void *hostOutput = nullptr;

        if (malloc_routing(&hostOutput, size, g_isDevice, rankId, false) != ACL_SUCCESS) {
                ERROR_LOG("Malloc device memory for output[%zu] failed", i);
                return false;
        }

        if (hostOutput == nullptr) {
            ERROR_LOG("Malloc host memory for output[%zu] failed", i);
            return false;
        }

        hostOutputs_.emplace_back(hostOutput);
        aclTensor *outputTensor;
        outputTensor =
            aclCreateTensor(GetOutputShape(i).data(), GetOutputNumDims(i), GetOutputDataType(i), nullptr, 0,
                            GetOutputFormat(i), GetOutputShape(i).data(), GetOutputNumDims(i), devOutputs_[i]);
        

        if (outputTensor == nullptr) {
            ERROR_LOG("Create Tensor for output[%zu] failed", i);
            return false;
        }
        outputTensor_.emplace_back(outputTensor);
    }
    if(rank_size > 1){
    	MPI_Barrier(MPI_COMM_WORLD);
    }
    return true;
}

const size_t OpRunner::NumInputs()
{
    return numInputs_;
}

const size_t OpRunner::NumOutputs()
{
    return numOutputs_;
}

const size_t OpRunner::GetInputSize(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return 0;
    }

    return aclGetTensorDescSize(opDesc_->inputDesc[index]);
}

const size_t OpRunner::GetInputNumDims(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return 0;
    }

    return aclGetTensorDescNumDims(opDesc_->inputDesc[index]);
}

aclDataType OpRunner::GetInputDataType(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return ACL_DT_UNDEFINED;
    }

    return aclGetTensorDescType(opDesc_->inputDesc[index]);
}

aclFormat OpRunner::GetInputFormat(size_t index) const
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return ACL_FORMAT_UNDEFINED;
    }

    return aclGetTensorDescFormat(opDesc_->inputDesc[index]);
}

std::vector<int64_t> OpRunner::GetInputShape(size_t index) const
{
    std::vector<int64_t> ret;
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return ret;
    }

    auto desc = opDesc_->inputDesc[index];
    for (size_t i = 0; i < aclGetTensorDescNumDims(desc); ++i) {
        int64_t dimSize;
        if (aclGetTensorDescDimV2(desc, i, &dimSize) != ACL_SUCCESS) {
            ERROR_LOG("get dims from tensor desc failed. dims index = %zu", i);
            ret.clear();
            return ret;
        }
        ret.emplace_back(dimSize);
    }

    return ret;
}

size_t OpRunner::GetOutputSize(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return 0;
    }

    return aclGetTensorDescSize(opDesc_->outputDesc[index]);
}

const size_t OpRunner::GetOutputNumDims(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return 0;
    }

    return aclGetTensorDescNumDims(opDesc_->outputDesc[index]);
}

aclDataType OpRunner::GetOutputDataType(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return ACL_DT_UNDEFINED;
    }

    return aclGetTensorDescType(opDesc_->outputDesc[index]);
}

aclFormat OpRunner::GetOutputFormat(size_t index) const
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return ACL_FORMAT_UNDEFINED;
    }

    return aclGetTensorDescFormat(opDesc_->outputDesc[index]);
}

std::vector<int64_t> OpRunner::GetOutputShape(size_t index) const
{
    std::vector<int64_t> ret;
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return ret;
    }

    auto desc = opDesc_->outputDesc[index];
    for (size_t i = 0; i < aclGetTensorDescNumDims(desc); ++i) {
        int64_t dimSize;
        if (aclGetTensorDescDimV2(desc, i, &dimSize) != ACL_SUCCESS) {
            ERROR_LOG("get dims from tensor desc failed. dims index = %zu", i);
            ret.clear();
            return ret;
        }
        ret.emplace_back(dimSize);
    }
    return ret;
}

size_t OpRunner::GetInputElementCount(size_t index) const
{
    if (index >= opDesc_->inputDesc.size()) {
        ERROR_LOG("index out of range. index = %zu, numInputs = %zu", index, numInputs_);
        return 0;
    }

    return aclGetTensorDescElementCount(opDesc_->inputDesc[index]);
}

size_t OpRunner::GetOutputElementCount(size_t index) const
{
    if (index >= opDesc_->outputDesc.size()) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return 0;
    }

    return aclGetTensorDescElementCount(opDesc_->outputDesc[index]);
}

bool OpRunner::RunOp(aclrtStream stream)
{
    
    for (size_t i = 0; i < numInputs_; ++i) {
        auto size = GetInputSize(i);
        if(size==0){
            continue;
        }
        aclrtMemcpyKind kind = ACL_MEMCPY_HOST_TO_DEVICE;
        if (g_isDevice) {
            kind = ACL_MEMCPY_DEVICE_TO_DEVICE;
        }
        if (aclrtMemcpy(devInputs_[i], size, hostInputs_[i], size, kind) != ACL_SUCCESS) {
            ERROR_LOG("Copy input[%zu] failed", i);
            return false;
        }
        INFO_LOG("Copy input[%zu] success", i);

    // for (size_t i = 0; i < 8; ++i) {
    //     auto size = GetOutputSize(i+1);
    //     if(size==0){
    //         continue;
    //     }
    //     aclrtMemcpyKind kind = ACL_MEMCPY_HOST_TO_DEVICE;
    //     if (g_isDevice) {
    //         kind = ACL_MEMCPY_DEVICE_TO_DEVICE;
    //     }
    //     if (aclrtMemcpy(devOutputs_[i+1], size, hostOutputs_[i+1], size, kind) != ACL_SUCCESS) {
    //         ERROR_LOG("Copy outputbuffer[%zu] failed", i);
    //         return false;
    //     }
    //     INFO_LOG("Copy outputbuffer[%zu] success", i);
    // }


    }

    size_t workspaceSize = 0;
    aclOpExecutor *handle = nullptr;

    int8_t* floatData = static_cast<int8_t*>((void*)inputTensor_[0]);
    print_int8_binary(floatData[0]);
    print_int8_binary(floatData[1]);

    int ret;

    INFO_LOG("input0:%lx, input1:%lx, input2:%lx, input3:%lx, input4:%lx,", (unsigned long)hostInputs_[0], (unsigned long)hostInputs_[1], (unsigned long)hostInputs_[2], (unsigned long)hostInputs_[3], (unsigned long)hostInputs_[4]);
    INFO_LOG("output0:%lx, output1:%lx, output2:%lx, output3:%lx, output4:%lx,", (unsigned long)devOutputs_[0], (unsigned long)devOutputs_[1], (unsigned long)devOutputs_[2], (unsigned long)devOutputs_[3], (unsigned long)devOutputs_[4]);

    ret = aclnnQuantMatmulV2GetWorkspaceSize(inputTensor_[0], inputTensor_[1], inputTensor_[2], inputTensor_[3], inputTensor_[4], ACL_BF16, false, false, 0, rank_size, rankId, p_value_main, 
                                                          outputTensor_[0],
                                                          outputTensor_[1],
                                                          outputTensor_[2],
                                                          outputTensor_[3],
                                                          outputTensor_[4],
                                                          outputTensor_[5],
                                                          outputTensor_[6],
                                                          outputTensor_[7],
                                                          outputTensor_[8],
                                                        &workspaceSize, &handle);
    if (ret != ACL_SUCCESS) {
        (void)aclrtDestroyStream(stream);
        ERROR_LOG("Get Operator Workspace failed. error code is %d", static_cast<int32_t>(ret));
        return false;
    }
    INFO_LOG("Execute aclnnQuantMatmulV2GetWorkspaceSize success, workspace size %lu", workspaceSize);

    workspaceSize_ = workspaceSize;
    if (workspaceSize != 0) {
        if (malloc_routing(&workspace_, workspaceSize, true, rankId, false) != ACL_SUCCESS) {
            ERROR_LOG("Malloc device memory failed");
        }
    }
    
    
    ret = aclnnQuantMatmulV2(workspace_, workspaceSize, handle, stream);
    if (ret != ACL_SUCCESS) {
        (void)aclrtDestroyStream(stream);
        ERROR_LOG("Execute Operator failed. error code is %d", static_cast<int32_t>(ret));
        return false;
    }
    INFO_LOG("Execute aclnnQuantMatmulV2 success");
    INFO_LOG("Execute aclnnQuantMatmulV2 return: %d", ret);
    

    ret = aclrtSynchronizeStream(stream);
    //ret = aclrtSynchronizeStream(stream);
    if (ret != SUCCESS) {
        ERROR_LOG("Synchronize stream failed. error code is %d", static_cast<int32_t>(ret));
        (void)aclrtDestroyStream(stream);
        return false;
    }
    INFO_LOG("Synchronize stream success");
    return true;
}

template <typename T> void DoPrintData(const T *data, size_t count, size_t elementsPerRow)
{
    assert(elementsPerRow != 0);
    for (size_t i = 0; i < count; ++i) {
        std::cout << std::setw(10) << data[i];
        if (i % elementsPerRow == elementsPerRow - 1) {
            std::cout << std::endl;
        }
    }
}

void DoPrintFp16Data(const aclFloat16 *data, size_t count, size_t elementsPerRow)
{
    assert(elementsPerRow != 0);
    for (size_t i = 0; i < count; ++i) {
        std::cout << std::setw(10) << std::setprecision(4) << aclFloat16ToFloat(data[i]);
        if (i % elementsPerRow == elementsPerRow - 1) {
            std::cout << std::endl;
        }
    }
}

void PrintData(const void *data, size_t count, aclDataType dataType, size_t elementsPerRow)
{
    if (data == nullptr) {
        ERROR_LOG("Print data failed. data is nullptr");
        return;
    }

    switch (dataType) {
        case ACL_BOOL:
            DoPrintData(reinterpret_cast<const bool *>(data), count, elementsPerRow);
            break;
        case ACL_INT8:
            DoPrintData(reinterpret_cast<const int8_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT8:
            DoPrintData(reinterpret_cast<const uint8_t *>(data), count, elementsPerRow);
            break;
        case ACL_INT16:
            DoPrintData(reinterpret_cast<const int16_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT16:
            DoPrintData(reinterpret_cast<const uint16_t *>(data), count, elementsPerRow);
            break;
        case ACL_INT32:
            DoPrintData(reinterpret_cast<const int32_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT32:
            DoPrintData(reinterpret_cast<const uint32_t *>(data), count, elementsPerRow);
            break;
        case ACL_INT64:
            DoPrintData(reinterpret_cast<const int64_t *>(data), count, elementsPerRow);
            break;
        case ACL_UINT64:
            DoPrintData(reinterpret_cast<const uint64_t *>(data), count, elementsPerRow);
            break;
        case ACL_FLOAT16:
            DoPrintFp16Data(reinterpret_cast<const aclFloat16 *>(data), count, elementsPerRow);
            break;
        case ACL_FLOAT:
            DoPrintData(reinterpret_cast<const float *>(data), count, elementsPerRow);
            break;
        case ACL_DOUBLE:
            DoPrintData(reinterpret_cast<const double *>(data), count, elementsPerRow);
            break;
        default:
            ERROR_LOG("Unsupported type: %d", dataType);
    }
}

void OpRunner::PrintInput(size_t index, size_t numElementsPerRow)
{
    if (index >= numInputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numInputs_);
        return;
    }

    auto desc = opDesc_->inputDesc[index];
    PrintData(hostInputs_[index], GetInputElementCount(index), aclGetTensorDescType(desc), numElementsPerRow);
}

void OpRunner::PrintOutput(size_t index, size_t numElementsPerRow)
{
    if (index >= numOutputs_) {
        ERROR_LOG("index out of range. index = %zu, numOutputs = %zu", index, numOutputs_);
        return;
    }

    auto desc = opDesc_->outputDesc[index];
    PrintData(hostOutputs_[index], GetOutputElementCount(index), aclGetTensorDescType(desc), numElementsPerRow);
}

void OpRunner::SaveOutput(uint32_t j){
    if (MEMFLAG == 0){
        for (size_t i = 0; i < numOutputs_; ++i) {
            auto size = GetOutputSize(i);
            aclrtMemcpyKind kind = ACL_MEMCPY_DEVICE_TO_HOST;
            if (g_isDevice) {
                kind = ACL_MEMCPY_DEVICE_TO_DEVICE;
            }
            if (aclrtMemcpy(hostOutputs_[i], size, devOutputs_[i], size, kind) != ACL_SUCCESS) {
                INFO_LOG("Copy output[%zu] not success", i);
                return;
            }
            INFO_LOG("Copy output[%zu] success", i);
        }
        }else {
            auto size = GetOutputSize(0);
            aclrtMemcpyKind kind = ACL_MEMCPY_DEVICE_TO_HOST;
            if (g_isDevice) {
                kind = ACL_MEMCPY_DEVICE_TO_DEVICE;
            }
            if (aclrtMemcpy(hostOutputs_[0], size, devOutputs_[0], size, kind) != ACL_SUCCESS) {
                INFO_LOG("Copy output[%zu] not success", 0);
                return;
            }
            INFO_LOG("Copy output[%zu] success", 0);
            WriteFile_bf16Toft32("../output/output_" + std::to_string(rankId) + "_ft32_"+std::to_string(j)+".bin", hostOutputs_[0], aclGetTensorDescSize(opDesc_->outputDesc[0]));
            INFO_LOG("Write output success");
        }

}