/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include "layernorm_operation.h"
#include <iostream>
#include <vector>

uint32_t constexpr MAX_TILING_SIZE = 5120;
uint32_t constexpr BLOCK_DIM = 8;

namespace atb_plugin {

LayernormOperation::LayernormOperation(const std::string &name, LayernormParam param) : name_(name), param_(param) {}

LayernormOperation::~LayernormOperation() {}

std::string LayernormOperation::GetName() const { return this->name_; }

atb::Status LayernormOperation::InferShape(
    const atb::SVector<atb::TensorDesc> &inTensorDescs, atb::SVector<atb::TensorDesc> &outTensorDescs) const
{
    std::cout << this->name_ << "LayernormOperation infer shape start" << std::endl;
    outTensorDescs.at(0).format = inTensorDescs.at(0).format;
    outTensorDescs.at(0).dtype = inTensorDescs.at(0).dtype;
    outTensorDescs.at(0).shape = inTensorDescs.at(0).shape;
    std::cout << name_ << "LayernormOperation infer shape end"
            << " format: " << inTensorDescs.at(0).format << " inDimNum: " << inTensorDescs.at(0).shape.dimNum
            << " inDims: " << inTensorDescs.at(0).shape.dims[0] << " outDimNum: " << outTensorDescs.at(0).shape.dimNum
            << " outDims: " << outTensorDescs.at(0).shape.dims[0] << std::endl;
    return 0;
}

uint32_t LayernormOperation::GetInputNum() const
{
    return NUM3;
}

uint32_t LayernormOperation::GetOutputNum() const
{
    return NUM1;
}

atb::Status LayernormOperation::Setup(const atb::VariantPack &variantPack, uint64_t &workspaceSize,
                                      atb::Context *context)
{
    std::cout << this->name_ << "setup start" << std::endl;
    std::vector<int64_t> inputShape;
    int64_t shape_size = 1;
    inputShape.clear();
    for (size_t i = 0; i < variantPack.inTensors.at(0).desc.shape.dimNum; i++) {
        inputShape.push_back(variantPack.inTensors.at(0).desc.shape.dims[i]);
        shape_size *= inputShape[i];
    }
    this->tiling_.tileLength = shape_size <= MAX_TILING_SIZE ? shape_size : MAX_TILING_SIZE;
    uint32_t rowTotalNum = inputShape[0];
    this->tiling_.rowLength = inputShape[1];
    this->tiling_.rowNum = rowTotalNum / BLOCK_DIM;
    this->tiling_.rowNumSp = this->tiling_.rowNum + 1;
    this->tiling_.blockPivot = rowTotalNum % BLOCK_DIM;
    this->tiling_.tileLoop = this->tiling_.tileLength / this->tiling_.rowLength;
    this->tiling_.tileLength = this->tiling_.tileLoop * this->tiling_.rowLength;
    this->tiling_.loopCount = this->tiling_.rowNum / this->tiling_.tileLoop;
    this->tiling_.factor = param_.factor;
    this->tiling_.mfactor = param_.mfactor;
    this->tiling_.eps = param_.eps;

    workspaceSize = sizeof(LayerNormTilingData);
    std::cout << this->name_ << "setup end. " << "WorkspaceSize:" << workspaceSize << std::endl;

    return 0;
}

atb::Status LayernormOperation::Execute(const atb::VariantPack &variantPack, uint8_t *workspace,
    uint64_t workspaceSize, atb::Context* context)
{
    std::cout << this->name_ << "execute start" << std::endl;
    aclrtStream stream = context->GetExecuteStream();
    auto ret = aclrtMemcpyAsync(workspace, 40, &(this->tiling_), 40, ACL_MEMCPY_HOST_TO_DEVICE, stream);
    if (ret != 0) {
        std::cout << this->name_ << "Tiling h2d memory copy fail" << std::endl;
        return ret;
    }
    LayerNorm_do(BLOCK_DIM, stream,
                 static_cast<uint8_t*>(variantPack.inTensors.at(0).deviceData),
                 static_cast<uint8_t*>(variantPack.inTensors.at(NUM1).deviceData),
                 static_cast<uint8_t*>(variantPack.inTensors.at(NUM2).deviceData),
                 static_cast<uint8_t*>(variantPack.outTensors.at(0).deviceData),
                 nullptr, workspace);
    std::cout << this->name_ << "execute end" << std::endl;
    return 0;
}

} // namespace atb_plugin