/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file flash_attention_score_common.h
 * \brief
 */
#ifndef FLASH_ATTENTION_SCORE_COMMON_H
#define FLASH_ATTENTION_SCORE_COMMON_H

#include "kernel_data_copy_transpose.h"
#include "kernel_operator.h"
#include "kernel_tiling/kernel_tiling.h"
#include "lib/matmul_intf.h"
#include "lib/matrix/matmul/tiling.h"
#include "stdarg.h"

constexpr MatmulConfig CFG_EXCEED = GetNormalConfig(true);
constexpr MatmulConfig CFG_IBSHARE_EXCEED = GetIBShareNormConfig(true);
constexpr static uint64_t BLOCK_BYTE = 32;
constexpr static int32_t SOFTMAX_M_ALIGNED_SIZE = 8;
constexpr static int32_t SOFTMAX_K_ALIGNED_SIZE = 64;
constexpr static uint64_t DATACOPYPAD_PADDING_VALUE_ZERO = 0;
constexpr static uint32_t NEGATIVE_MIN_VAULE_FP32 = 0xFF7FFFFF;
constexpr static uint32_t NEGATIVE_MIN_VAULE_FP16 = 0xFBFF;
constexpr static uint32_t POSITIVE_MAX_VALUE_FP32 = 0x7F7FFFFF;
constexpr static uint32_t POSITIVE_MAX_VALUE_FP16 = 0x7BFF;
constexpr static uint16_t SOFTMAX_CHECK_RES_DEFAULT_VALUE = 0xFFFF;
constexpr static int32_t repeatMaxBytes = 256;
constexpr static int32_t repeatMaxTimes = 255;
constexpr static int32_t repeatMaxSize = repeatMaxBytes / 4; // 4 means sizeof(T)
constexpr static int64_t pseS1S2 = 0;
constexpr static int64_t pse1S2 = 1;
constexpr static int64_t pse1NS1S2 = 2;
constexpr static int64_t attenMaskBN2GS1S2 = 0;
constexpr static int64_t attenMaskBS1S2 = 1;
constexpr static int64_t attenMaskS1S2 = 2;
constexpr static int64_t attenMaskTT = 99;
constexpr static uint16_t PREFIX_N_MAX_B = 32;
constexpr static int32_t blockBytes = 32;
// 0级接口的block间隔范围需要满足32B对齐
constexpr static int32_t blockSize = blockBytes / 4; // 4 means sizeof(T)
constexpr static int32_t byteBitRatio = 8;
constexpr static int32_t fp32BaseSize = 8;

enum class SparseModeEnum {
    ALL = 0,
    NONE = 1,
    ANY = 2,
    CAUSAL = 3,
    BAND = 4,
    PREFIX = 5,
    BAND_COMPRESS = 6,
    RIGHT_DOWN_CAUSAL = 7,
    RIGHT_DOWN_CAUSAL_BAND = 8,
    BAND_LEFT_UP_CAUSAL = 9
};

enum class ImplModeEnum { AA_HIGH_PRECISION = 0, AA_HIGH_PERFORMANCE = 1, AA_INVALID_LINE_HIGH_PRECISION = 2};

enum class AttenMaskCompressMode { NO_COMPRESS_MODE = 0, LEFT_UP_CAUSAL_MODE = 1, RIGHT_DOWN_CAUSAL_MODE = 2,
                                   BAND_MODE = 3, PREFIX_MODE = 4, RIGHT_DOWN_CAUSAL_BAND_MODE = 5,
                                   BAND_LEFT_UP_CAUSAL_MODE = 6};

enum class AttenMaskComputeMode { NORMAL_MODE = 0, CAUSAL_OR_NEXT_ONLY_MODE, PRE_ONLY_MODE,
                                  PRE_AND_NEXT_MODE, NO_NEED_COMPUTE_MODE, PREFIX_COMPUTE_MODE, PREFIX_N_COMPUTE_MODE};

enum class LayOutTypeEnum { None = 0, LAYOUT_BSH = 1, LAYOUT_SBH = 2, LAYOUT_BNSD = 3, LAYOUT_TND = 4};

namespace math {
template <typename T> __aicore__ inline T Ceil(T a, T b)
{
    return (a + b - 1) / b;
}

template <typename T> __aicore__ inline T Align(T a, T b)
{
#ifdef __CCE_KT_TEST__
    assert(b != 0 && "b cannot be 0");
#endif
    return (a + b - 1) / b * b;
}
}

__aicore__ inline bool IsBasicBlockInSoftMax(int32_t srcM, int32_t srcK)
{
    return srcM % SOFTMAX_M_ALIGNED_SIZE == 0 && srcK % SOFTMAX_K_ALIGNED_SIZE == 0;
}

__aicore__ inline bool hasInvalidLine(uint16_t softMaxCheckRes, uint32_t bitIdx)
{
    return ((softMaxCheckRes >> bitIdx) & 0x01);
}

__aicore__ inline void UpdateSoftMaxCheckRes(uint16_t &softMaxCheckRes, uint32_t bitIdx, bool bitValue)
{
    if (bitValue) {
        softMaxCheckRes |= 1 << bitIdx;
    } else {
        softMaxCheckRes &= ~(1 << bitIdx);
    }
}

__aicore__ inline bool IsIncludeInvalidLine(uint16_t softMaxCheckRes, uint32_t bitIdxB, uint32_t bitIdxA = 0)
{
    if (bitIdxA == 0) {
        return (softMaxCheckRes & ((1 << bitIdxB) - 1));
    } else {
        uint16_t mask = (1 << (bitIdxB - bitIdxA + 1)) - 1;
        mask = mask << bitIdxA;
        return (softMaxCheckRes & mask);
    }
}

template <typename T>
__aicore__ inline void GetExtremeValue(T &negativeScalar, T &positiveScalar)
{
    if constexpr (IsSameType<T, float>::value) {
        uint32_t tmp1 = NEGATIVE_MIN_VAULE_FP32;
        uint32_t tmp2 = POSITIVE_MAX_VALUE_FP32;
        negativeScalar = *((float *)&tmp1);
        positiveScalar = *((float *)&tmp2);
    } else {
        uint16_t tmp1 = NEGATIVE_MIN_VAULE_FP16;
        uint16_t tmp2 = POSITIVE_MAX_VALUE_FP16;
        negativeScalar = *((half *)&tmp1);
        positiveScalar = *((half *)&tmp2);
    }
}

template <typename T>
__aicore__ inline void DataCopy2D(const LocalTensor<T> &dstLocal, const GlobalTensor<T> &srcGlobal, const uint32_t d0,
    const uint32_t d1, const uint32_t orgD1, uint64_t paddingValue = 0)
{
    if (d1 % (BLOCK_BYTE / sizeof(T)) == 0 && orgD1 % (BLOCK_BYTE / sizeof(T)) == 0) {
        auto d1Blocks = math::Ceil(d1 * sizeof(T), BLOCK_BYTE);
        auto orgD1Blocks = math::Ceil(orgD1 * sizeof(T), BLOCK_BYTE);
        DataCopyParams copyParams(d0, d1Blocks, orgD1Blocks - d1Blocks, 0);
        DataCopy(dstLocal, srcGlobal, copyParams);
    } else {
        auto d1Bytes = d1 * sizeof(T);
        auto d1Aligned = math::Align(static_cast<int64_t>(d1), static_cast<int64_t>(BLOCK_BYTE / sizeof(T)));
#if __CCE_KT_TEST__
        assert(d0 <= 65535 && "count must in [0, 4095]");
        assert(d1Bytes <= 65535 && "len must in [0, 65535]");
#endif
        DataCopyParams copyParams(static_cast<uint16_t>(d0), static_cast<uint16_t>(d1Bytes),
            orgD1 * sizeof(T) - d1Bytes, 0);
        DataCopyPadParams padParams(true, 0, static_cast<uint8_t>(d1Aligned - d1), paddingValue);
        DataCopyPad(dstLocal, srcGlobal, copyParams, padParams);
    }
}
template <typename T1, typename T2>
__aicore__ inline T1 CeilDiv(T1 a, T2 b)
{
    if (b == 0) {
        return 0;
    }
    return (a + b - 1) / b;
}

template <typename T1, typename T2>
__aicore__ inline T1 Max(T1 a, T2 b)
{
    return (a > b) ? (a) : (b);
}

template <typename T1, typename T2>
__aicore__ inline T1 Min(T1 a, T2 b)
{
    return (a > b) ? (b) : (a);
}

__aicore__ inline int32_t Align(int32_t shape)
{
    int32_t alignFactor = 16;
    int32_t alignedSize = CeilDiv(shape, alignFactor) * alignFactor;
    return alignedSize;
}

template <typename T>
__aicore__ inline void BroadcastAdd(
    const LocalTensor<T> &src0Tensor, const LocalTensor<T> &src1Tensor, int64_t src0Offset, int32_t src1Size,
    int32_t repeatTimes)
{
    /* Total data number of single step should be smaller than 256bytes.
     * If larger, we need to do add multiple times. */
    int32_t innerLoop = src1Size / repeatMaxSize;   // s2轴整块计算次数
    int32_t innerRemain = src1Size % repeatMaxSize; // s2轴尾块计算量
    BinaryRepeatParams binaryRepeatParams;
    binaryRepeatParams.src0BlkStride = 1;
    binaryRepeatParams.src0RepStride = src1Size / blockSize;
    binaryRepeatParams.src1BlkStride = 1;
    binaryRepeatParams.src1RepStride = 0;
    binaryRepeatParams.dstRepStride = binaryRepeatParams.src0RepStride;
    binaryRepeatParams.blockNumber = binaryRepeatParams.src0RepStride;

    for (int32_t j = 0; j < innerLoop; j++) {
        auto innerOffset = j * repeatMaxSize;
        auto ubOffset = src0Offset + innerOffset;
        Add(src0Tensor[ubOffset], src0Tensor[ubOffset], src1Tensor[innerOffset], repeatMaxSize, repeatTimes,
            binaryRepeatParams);
    }
    if (innerRemain > 0) {
        auto innerOffset = innerLoop * repeatMaxSize;
        auto ubOffset = src0Offset + innerOffset;
        Add(src0Tensor[ubOffset], src0Tensor[ubOffset], src1Tensor[innerOffset], innerRemain, repeatTimes,
            binaryRepeatParams);
    }
}
template <typename T>
__aicore__ inline void PseBroadcastAdd(int32_t s1Size,
    int32_t s2Size, int32_t computeSize, const LocalTensor<T> &pseUb, const LocalTensor<T> &dstTensor, uint32_t pseShapeType)
{
    if (pseShapeType == pseS1S2) {
        Add(dstTensor, dstTensor, pseUb, computeSize);
    } else {
        /* Total repeated times should be <= repeatMaxTimes. If larger,
         * we need to do multiple inner loops. */
        int32_t s1OuterLoop = s1Size / repeatMaxTimes;
        int32_t s1OuterRemain = s1Size % repeatMaxTimes;
        for (int32_t s1OuterIdx = 0; s1OuterIdx < s1OuterLoop; s1OuterIdx++) {
            int32_t s1OuterOffset = s1OuterIdx * repeatMaxTimes * s2Size;
            BroadcastAdd(dstTensor, pseUb, s1OuterOffset, s2Size, repeatMaxTimes);
        }
        if (s1OuterRemain > 0) {
            int32_t s1OuterOffset = s1OuterLoop * repeatMaxTimes * s2Size;
            BroadcastAdd(dstTensor, pseUb, s1OuterOffset, s2Size, s1OuterRemain);
        }
    }
}

__aicore__ inline int64_t ComputeOffsetForCausal(
    const int64_t &delta, const uint32_t &s1BaseSize, const uint32_t &s2BaseSize, const uint32_t &attenMaskS2Size)
{
    if (delta <= 0) {
        return Min(-1 * delta, s1BaseSize);
    } else {
        return Min(delta, s2BaseSize) * attenMaskS2Size;
    }
}

__aicore__ inline int64_t ComputeOffsetForPrefixRectangle(
    const int64_t &delta, const uint32_t &s2BaseSize, const uint32_t &attenMaskS2Size)
{
    // attenMask S1 is same to S2
    if (delta <= 0) {
        return attenMaskS2Size * attenMaskS2Size + attenMaskS2Size / 2; // 2048 * 2048 + 1024
    } else if (delta > s2BaseSize) {
        return attenMaskS2Size * attenMaskS2Size; // 2048 * 2048 + 0
    } else {
        return attenMaskS2Size * attenMaskS2Size + attenMaskS2Size / 2 - delta + 1; // 2048 * 2048 + (1024 - delta)
    }
}

#endif // FLASH_ATTENTION_SCORE_COMMON_H
