/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include "cast_operation.h"
#include <cstring>
#include <iostream>
#include <securec.h>
#include <sstream>
#include <unistd.h>
#include "acl/acl.h"
#include "aclnnop/aclnn_cast.h"
#include "utils.h"
#include "acl_nn_tensor.h"

namespace atb_plugin {


CastOperation::CastOperation(const std::string &name, CastParam param) : AclNNOperation(name), param_(param) {
}

CastOperation::~CastOperation() {
}

atb::Status CastOperation::InferShape(
    const atb::SVector<atb::TensorDesc> &inTensorDescs, atb::SVector<atb::TensorDesc> &outTensorDescs) const
{
    std::cout << this->opName_ << "CastOperation infer shape start" << std::endl;
    outTensorDescs.at(0).format = inTensorDescs.at(0).format;
    outTensorDescs.at(0).dtype = this->param_.dataType;
    outTensorDescs.at(0).shape.dimNum = inTensorDescs.at(0).shape.dimNum;
    for (size_t i = 0; i < inTensorDescs.at(0).shape.dimNum; i++) {
        outTensorDescs.at(0).shape.dims[i] = inTensorDescs.at(0).shape.dims[i];
    }
    std::cout << opName_ << "CastOperation infer shape end"
                  << " format: " << inTensorDescs.at(0).format << " dimNum: " << inTensorDescs.at(0).shape.dimNum
                  << " dims: " << inTensorDescs.at(0).shape.dims[0] << std::endl;
    return 0;
}

uint32_t CastOperation::GetInputNum() const
{
    return NUM1;
}

uint32_t CastOperation::GetOutputNum() const
{
    return NUM1;
}

int CastOperation::CreateAclNNInTensorVariantPack(const atb::VariantPack &variantPack)
{
    this->aclInTensors.resize(GetInputNum());
    for (size_t i = 0; i < this->aclInTensors.size(); ++i) {
        std::shared_ptr<AclNNTensor> aclnnTensor = std::make_shared<AclNNTensor>();
        aclnnTensor->tensorIdx = i;
        aclnnTensor->atbTensor = variantPack.inTensors.at(i);
        atb::Tensor squeezedAtbTensor = SqueezeBatchSeq(variantPack.inTensors.at(i));

        aclnnTensor->strides = GetCopyTensorStride(squeezedAtbTensor.desc.shape);
        aclnnTensor->tensor = aclCreateTensor(squeezedAtbTensor.desc.shape.dims,
            squeezedAtbTensor.desc.shape.dimNum,
            squeezedAtbTensor.desc.dtype,
            aclnnTensor->strides.data(),
            0,
            squeezedAtbTensor.desc.format,
            squeezedAtbTensor.desc.shape.dims,
            squeezedAtbTensor.desc.shape.dimNum,
            squeezedAtbTensor.deviceData);

        if (aclnnTensor->tensor == nullptr) {
            std::cout << this->opName_ << "InTensor aclCreateTensor failed" << std::endl;
            return atb::ERROR_INTERNAL_ERROR;
        }
        this->aclInTensors[i] = aclnnTensor;
    }
    return atb::NO_ERROR;
}

int CastOperation::CreateAclNNOutTensorVariantPack(const atb::VariantPack &variantPack)
{
    this->aclOutTensors.resize(GetOutputNum());
    for (size_t i = 0; i < this->aclOutTensors.size(); ++i) {
        std::shared_ptr<AclNNTensor> aclnnTensor = std::make_shared<AclNNTensor>();
        aclnnTensor->tensorIdx = i;
        aclnnTensor->atbTensor = variantPack.outTensors.at(i);
        atb::Tensor squeezedAtbTensor = SqueezeBatchSeq(variantPack.outTensors.at(i));
        aclnnTensor->strides = GetCopyTensorStride(squeezedAtbTensor.desc.shape);
        aclnnTensor->tensor = aclCreateTensor(squeezedAtbTensor.desc.shape.dims,
            squeezedAtbTensor.desc.shape.dimNum,
            squeezedAtbTensor.desc.dtype,
            aclnnTensor->strides.data(),
            0,
            squeezedAtbTensor.desc.format,
            squeezedAtbTensor.desc.shape.dims,
            squeezedAtbTensor.desc.shape.dimNum,
            squeezedAtbTensor.deviceData);
        if (aclnnTensor->tensor == nullptr) {
            std::cout << this->opName_ << "OutTensor aclCreateTensor index failed" << std::endl;
            return atb::ERROR_INTERNAL_ERROR;
        }
        this->aclOutTensors[i] = aclnnTensor;
    }
    return atb::NO_ERROR;
}

atb::Status CastOperation::CreateAclNNVariantPack(const atb::VariantPack &variantPack)
{
    std::cout << this->opName_ << "CreateAclNNVariantPack start" << std::endl;
    int ret = 0;
    ret = CreateAclNNInTensorVariantPack(variantPack);
    if (ret != 0) {
        std::cout << this->opName_ << "AclNNTensor CreateAclNNInTensorVariantPack fail" << std::endl;
        return ret;
    }

    ret = CreateAclNNOutTensorVariantPack(variantPack);
    if (ret != 0) {
        std::cout << this->opName_ << "AclNNTensor CreateAclNNOutTensorVariantPack fail" << std::endl;
        return ret;
    }

    std::cout << this->opName_ << "CreateAclNNVariantPack end" << std::endl;
    return atb::NO_ERROR;
}

int CastOperation::SetAclNNWorkspaceExecutor()
{
    std::cout << this->opName_ << "SetAclNNWorkspaceExecutor start" << std::endl;
    int ret = aclnnCastGetWorkspaceSize(this->aclInTensors.at(0)->tensor,
        this->param_.dataType,
        this->aclOutTensors.at(0)->tensor,
        &this->workspaceSize,
        &this->aclExecutor);
    return ret;
}

int CastOperation::ExecuteAclNNOp(uint8_t *workspace, aclrtStream &stream)
{
    std::cout << this->opName_ << "ExecuteAclNNOp start" << std::endl;
    int ret = aclnnCast(workspace, this->workspaceSize, this->aclExecutor, stream);
    std::cout << this->opName_ << "ExecuteAclNNOp end" << std::endl;
    return ret;
}
} // namespace atb_plugin

