#include "kernel_operator.h"
#include <cstdint>
#include <cmath>
#include <type_traits>

constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue

// 定义复数类型
struct complex64 {
    float real;
    float imag;
};

// 定义复数类型
struct complex128 {
    float real_1;
    float real_2;
    float imag_1;
    float imag_2;
};

__aicore__ inline bool IsFloat32NaN(float val) {
    uint32_t bits = *reinterpret_cast<const uint32_t*>(&val);
    uint32_t exp  = (bits >> 23) & 0xFF;
    uint32_t frac = bits & 0x7FFFFF;
    return (exp == 0xFF) && (frac != 0);
}

__aicore__ inline bool IsDoubleNaN(float high_float, float low_float) {
    // 步骤1：将两个 float 转为 uint32_t，获取二进制位
    uint32_t high_bits = *reinterpret_cast<const uint32_t*>(&high_float);
    uint32_t low_bits = *reinterpret_cast<const uint32_t*>(&low_float);
    
    // 步骤2：组合为 64 位 double 的二进制（高32位 + 低32位）
    uint64_t double_bits = (static_cast<uint64_t>(high_bits) << 32) | low_bits;
    
    // 步骤3：按 IEEE 754 double 规则判断 NaN：
    // - 指数位（11位：bit62 ~ bit52）全为 1
    // - 尾数位（52位：bit51 ~ bit0）非全为 0
    uint64_t exp_mask = 0x7FF0000000000000ULL; // 11位指数位的掩码
    uint64_t mantissa_mask = 0x000FFFFFFFFFFFFFULL; // 52位尾数位的掩码
    uint64_t exp = double_bits & exp_mask;
    uint64_t mantissa = double_bits & mantissa_mask;
    
    return (exp == exp_mask) && (mantissa != 0);
}

__aicore__ inline bool IsHalfNaN(half val) {
    uint16_t bits = *reinterpret_cast<const uint16_t*>(&val);
    uint16_t exp  = (bits >> 10) & 0x1F;
    uint16_t frac = bits & 0x3FF;
    return (exp == 0x1F) && (frac != 0);
}

__aicore__ inline bool IsBfloat16NaN(bfloat16_t val) {
    uint16_t bits = *reinterpret_cast<const uint16_t*>(&val);
    uint16_t exp  = (bits >> 7) & 0xFF;
    uint16_t frac = bits & 0x7F;
    return (exp == 0xFF) && (frac != 0);
}

__aicore__ inline bool IsComplex64NaN(float real, float imag) {
    return IsFloat32NaN(real) || IsFloat32NaN(imag);
}

__aicore__ inline bool IsComplex128NaN(float real_1, float real_2, float imag_1, float imag_2) {
    return IsDoubleNaN(real_1, real_2) || IsDoubleNaN(imag_1, imag_2);
}

template<typename TYPE_X, typename TYPE_OUT> class KernelIsnan {
public:
    __aicore__ inline KernelIsnan() {}

    __aicore__ inline void Init(GM_ADDR x, GM_ADDR out, uint32_t smallCoreDataNum,
                                uint32_t bigCoreDataNum, uint32_t finalBigTileNum, 
                                uint32_t finalSmallTileNum, uint32_t tileDataNum, 
                                uint32_t smallTailDataNum, uint32_t bigTailDataNum, 
                                uint32_t tailBlockNum, uint32_t dataTypeId)
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreNum = AscendC::GetBlockIdx();
        uint32_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();
        this->dataTypeId = dataTypeId;

        this->tileDataNum = tileDataNum;
        if (coreNum < tailBlockNum) { 
          this->coreDataNum = bigCoreDataNum;
          this->tileNum = finalBigTileNum;
          this->tailDataNum = bigTailDataNum;
        }
        else { 
          this->coreDataNum = smallCoreDataNum;
          this->tileNum = finalSmallTileNum;
          this->tailDataNum = smallTailDataNum;
          globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);
        }
        xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalBufferIndex, this->coreDataNum);
        outGm.SetGlobalBuffer((__gm__ TYPE_OUT*)out + globalBufferIndex, this->coreDataNum);
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(outQueueBool, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_OUT));
    }

    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
              this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
      AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
      AscendC::DataCopy(xLocal, xGm[progress * this->tileDataNum], this->processDataNum);
      inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute(int32_t progress) {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_OUT> outLocal = outQueueBool.AllocTensor<TYPE_OUT>();
        
        bool isNaN = false;

        for (int32_t i = 0; i < this->processDataNum; i++) {
            // 编译期判断模板类型，只保留当前类型的代码
            if constexpr (std::is_same_v<TYPE_X, float>) {
                // float32 分支：正常用 GetValue()
                float element = xLocal.GetValue(i);
                isNaN = IsFloat32NaN(element);
            } else if constexpr (std::is_same_v<TYPE_X, half>) {
                // float16 分支
                half element = xLocal.GetValue(i);
                isNaN = IsHalfNaN(element);
            } else if constexpr (std::is_same_v<TYPE_X, double>) {
                // 将 double 的 LocalTensor 重新解释为 float 张量（1个double→2个float）
                AscendC::LocalTensor<float> xLocal_float = xLocal.template ReinterpretCast<float>();
                // 小端机：低32位在低地址（先读），高32位在高地址（后读）
                float low_float  = xLocal_float.GetValue(i * 2);
                float high_float = xLocal_float.GetValue(i * 2 + 1);
                isNaN = IsDoubleNaN(high_float, low_float);
            } else if constexpr (std::is_same_v<TYPE_X, bfloat16_t>) {
                // bfloat16 分支
                bfloat16_t element = xLocal.GetValue(i);
                isNaN = IsBfloat16NaN(element);
            } else if constexpr (std::is_same_v<TYPE_X, complex64>) {
                AscendC::LocalTensor<float> xLocal_rc = xLocal.template ReinterpretCast<float>();
                float real = xLocal_rc.GetValue(2 * i);
                float imag = xLocal_rc.GetValue(2 * i + 1);
                isNaN = IsComplex64NaN(real, imag);
            } else if constexpr (std::is_same_v<TYPE_X, complex128>) {
                // 将 complex128 张量重新解释为 float 张量（1个complex128 = 4个float）
                AscendC::LocalTensor<float> xLocal_float = xLocal.template ReinterpretCast<float>();
                // 每个 complex128 对应 4 个 float：实部(高+低)、虚部(高+低)
                float real_high = xLocal_float.GetValue(i * 4 + 0);  // 实部高32位
                float real_low  = xLocal_float.GetValue(i * 4 + 1);  // 实部低32位
                float imag_high = xLocal_float.GetValue(i * 4 + 2);  // 虚部高32位
                float imag_low  = xLocal_float.GetValue(i * 4 + 3);  // 虚部低32位
                // 调用新定义的 IsComplex128NaN，避免使用 double
                isNaN = IsComplex128NaN(real_high, real_low, imag_high, imag_low);
            } else {
                // 整数/布尔类型：无 NaN，直接返回 false
                isNaN = false;
            }

            outLocal.SetValue(i, isNaN);
        }
        
        outQueueBool.EnQue(outLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void CopyOut(int32_t progress)
    {
      AscendC::LocalTensor<TYPE_OUT> outLocal = outQueueBool.DeQue<TYPE_OUT>();
      AscendC::DataCopyExtParams copyParams{1, (uint32_t)(this->processDataNum * sizeof(int8_t)), 0, 0, 0};
      
      AscendC::DataCopyPad(outGm[progress * this->tileDataNum], outLocal, copyParams);
      
      outQueueBool.FreeTensor(outLocal);
    }
    
private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueBool;
    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_OUT> outGm;

    AscendC::DataCopyExtParams copyParams;

    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
    uint32_t dataTypeId;
};

extern "C" __global__ __aicore__ void isnan_custom(GM_ADDR x, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    KernelIsnan<DTYPE_X, DTYPE_OUT> op;
    op.Init(x, out, tiling_data.smallCoreDataNum, 
            tiling_data.bigCoreDataNum, tiling_data.finalBigTileNum, 
            tiling_data.finalSmallTileNum, tiling_data.tileDataNum, 
            tiling_data.smallTailDataNum, tiling_data.bigTailDataNum, 
            tiling_data.tailBlockNum,tiling_data.dataTypeId);  
    op.Process();
}
