#include "broadcast_helper.h"
#include "kernel_operator.h"
#include "op_common.h"
#include <cstdint>
#include <float.h>
#include <type_traits>

using namespace AscendC;
template <typename _DT_X, typename _DT_Y> class Kernel {
public:
  TPipe pipe;
  DefInTensor(X);
  DefGlobalTensor(Y);

  DefOutQue(MIN);
  DefOutQue(MAX);

  DefBufVECIN(CASTED_X);   // float
  DefBufVECIN(TEMP1);      // float
  DefBufVECIN(TEMP2);      // int8_t
  DefBufVECIN(TEMP3);      // float
  DefBufVECIN(wORK_LOCAL); // float
  int64_t bins;
  float min;
  float max;
  float step;
  int32_t minInt;
  int32_t maxInt;

  int64_t tileLength;
  int64_t size;
  int64_t finalLength;

public:
  __aicore__ inline Kernel() {}

  __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, int64_t bins, float min,
                              float max, int64_t tileLength, int64_t size,
                              int64_t finalLength) {

    this->bins = bins;
    this->min = min;
    this->max = max;
    this->tileLength = tileLength;
    this->finalLength = finalLength;
    this->size = size;
    // 切分global
    GSetBuffer(X, x, 0, size);
    GSetBuffer(Y, y, 0, bins);
    // 初始化队列
    InitQueueSimple(X, tileLength);

    constexpr int64_t alignSizeX = 32 / sizeof(TypeOf(X));
    constexpr int64_t elementPerRepeatX = 256 / sizeof(TypeOf(X));
    constexpr int64_t alignSizeFloat = 32 / sizeof(float);
    constexpr int64_t elementPerRepeatFloat = 256 / sizeof(float);

    // 判断是否需要求最大最小值
    if (min == 0 && max == 0) {
      InitQueue(MIN, 32);
      InitQueue(MAX, 32);
    }
    // 初始化BUF
    if constexpr (std::is_same_v<TypeOf(X), half> ||
                  std::is_same_v<TypeOf(X), int32_t>) {
      InitTBufBuffer(CASTED_X, tileLength * sizeof(float));
    }
    InitTBufBuffer(TEMP1, tileLength * sizeof(float));
    InitTBufBuffer(TEMP2, tileLength);
    InitTBufBuffer(TEMP3, tileLength * sizeof(float));

    auto work_locallenth =
        ALIGN_TO(tileLength / elementPerRepeatFloat * 2, elementPerRepeatFloat);
    InitTBufBuffer(wORK_LOCAL, work_locallenth * sizeof(float));
  }

  template <typename T> __aicore__ inline int64_t GetBin(T bVal) {

    auto bin = (((bVal - min)) * bins / (max - min));
    // (only applicable for histc)
    // while each bin is inclusive at the lower end and exclusive at the higher,
    // i.e. [start, end) the last bin is inclusive at both, i.e. [start, end],
    // in order to include maxvalue if exists therefore when bin == nbins,
    // adjust bin to the last bin
    if (bin == bins)
      bin -= 1;
    return bin;
  }

  __aicore__ inline void Process() {
    auto loopCount = CEIL_DIV(size, tileLength);
    auto finnal_progress = loopCount - 1;
    if (min == 0 && max == 0) {
      min = FLT_MAX;
      max = FLT_MIN;
      if constexpr (std::is_same_v<_DT_X, half>) {
        for (auto i = 0; i < size; ++i) {
          float x_v = GTensorName(X).GetValue(i);
          if (x_v < min) {
            min = x_v;
          }
          if (x_v > max) {
            max = x_v;
          }
        }
      } else {
        for (uint32_t i = 0; i < finnal_progress; ++i) {
          CopyIn(i, tileLength);
          ComputeMinMax(tileLength);
          CopyOutMinMax(i, tileLength);
        }
        CopyIn(finnal_progress, finalLength);
        ComputeMinMax(finalLength);
        CopyOutMinMax(finnal_progress, finalLength);
      }
    }

    minInt = MY_SCALAR_REINTERPRET_CAST(int32_t, min);
    maxInt = MY_SCALAR_REINTERPRET_CAST(int32_t, max);

    step = (max - min) / float(bins);
    if constexpr (std::is_same_v<TypeOf(X), float>) {
      for (auto i = 0; i < bins; ++i) {
        GTensorName(Y).SetValue(i, float(0));
      }
      for (auto i = 0; i < size; ++i) {
        float x_v = GTensorName(X).GetValue(i);
        if (x_v >= min && x_v <= max) {
          auto bin = GetBin(x_v);
          auto counter = GTensorName(Y).GetValue(bin) + 1;
          GTensorName(Y).SetValue(bin, counter);
        }
      }
    } else if constexpr (std::is_same_v<TypeOf(X), int32_t>) {
      for (auto i = 0; i < bins; ++i) {
        GTensorName(Y).SetValue(i, int32_t(0));
      }
      for (auto i = 0; i < size; ++i) {
        float x_v = GTensorName(X).GetValue(i);
        if (x_v >= min && x_v <= max) {
          auto bin = GetBin(x_v);
          auto counter = GTensorName(Y).GetValue(bin) + 1;
          GTensorName(Y).SetValue(bin, int32_t(counter));
        }
      }
    } else if constexpr (std::is_same_v<TypeOf(X), half>) {
      for (auto i = 0; i < bins; ++i) {
        GTensorName(Y).SetValue(i, half(0));
      }
      for (auto i = 0; i < size; ++i) {
        float x_v = GTensorName(X).GetValue(i);
        if (x_v >= min && x_v <= max) {
          auto bin = GetBin(x_v);
          auto counter = float(GTensorName(Y).GetValue(bin)) + 1;
          GTensorName(Y).SetValue(bin, half(counter));
        }
      }
    }
  }

  __aicore__ inline void GetMinMax(LocalTensor<float> x, uint32_t calcCount) {
    QueAlloc(MIN, float);
    QueAlloc(MAX, float);
    TBufGet(wORK_LOCAL, float);
    ReduceMin(LTensorName(MIN), x, TBufTensorName(wORK_LOCAL), calcCount);
    ReduceMax(LTensorName(MAX), x, TBufTensorName(wORK_LOCAL), calcCount);
    EnQue(MIN);
    EnQue(MAX);
  }

  __aicore__ inline void ComputeMinMax(uint32_t calcCount) {
    DeQueSimple(X);
    if constexpr (std::is_same_v<TypeOf(X), int32_t>) {
      TBufGet(CASTED_X, float);
      Cast(TBufTensorName(CASTED_X), LTensorName(X), RoundMode::CAST_FLOOR,
           calcCount);
      GetMinMax(TBufTensorName(CASTED_X), calcCount);
    } else if constexpr (std::is_same_v<TypeOf(X), half>) {
      QueAlloc(MIN, half);
      QueAlloc(MAX, half);
      TBufGet(wORK_LOCAL, half);
      ReduceMin(LTensorName(MIN), LTensor(X), TBufTensorName(wORK_LOCAL),
                calcCount);
      ReduceMax(LTensorName(MAX), LTensor(X), TBufTensorName(wORK_LOCAL),
                calcCount);
      EnQue(MIN);
      EnQue(MAX);
      // TBufGet(CASTED_X, float);
      // Cast(TBufTensorName(CASTED_X), LTensorName(X), RoundMode::CAST_NONE,
      //      calcCount);
      // GetMinMax(TBufTensorName(CASTED_X), calcCount);
    } else {
      GetMinMax(LTensorName(X), calcCount);
    }
    QueFree(X);
  }

  __aicore__ inline void CopyOutMinMax(uint32_t i, uint32_t calcCount) {
    if constexpr (std::is_same_v<TypeOf(X), half>) {
      DeQue(MIN, half);
      DeQue(MAX, half);
      float ret_min = LTensorName(MIN).GetValue(0);
      if (ret_min < min) {
        min = ret_min;
      }
      float ret_max = LTensorName(MAX).GetValue(0);
      if (ret_max > max) {
        max = ret_max;
      }
      QueFree(MIN);
      QueFree(MAX);
    } else {
      DeQue(MIN, float);
      DeQue(MAX, float);
      auto ret_min = LTensorName(MIN).GetValue(0);
      if (ret_min < min) {
        min = ret_min;
      }
      auto ret_max = LTensorName(MAX).GetValue(0);
      if (ret_max > max) {
        max = ret_max;
      }
      QueFree(MIN);
      QueFree(MAX);
    }
  }

  __aicore__ inline void CopyIn(uint32_t i, uint32_t calcCount) {
    constexpr int64_t alignSize = 32 / sizeof(TypeOf(X));
    EnQueGlobal2Local(X, i * tileLength, ALIGN_TO(calcCount, alignSize));
  }

  template <typename T>
  __aicore__ inline void
  HistogramOnce(LocalTensor<float> x, LocalTensor<float> temp1,
                LocalTensor<int8_t> temp2, LocalTensor<int8_t> temp3,
                LocalTensor<float> work_local, uint32_t calcCount,
                uint32_t loop, int64_t i, float min_value, float max_value,
                CMPMODE rightCmpMode) {
    MY_DUP_INT32(temp1, minInt, calcCount);
    Compare(temp2, x, temp1, CMPMODE::GE, calcCount);
    MY_DUP_INT32(temp1, maxInt, calcCount);
    Compare(temp3, x, temp1, rightCmpMode, calcCount);
    auto int16_temp2 = temp2.ReinterpretCast<int16_t>();
    auto int16_temp3 = temp3.ReinterpretCast<int16_t>();
    And(int16_temp2, int16_temp2, int16_temp3, calcCount / 2);
    MY_DUP_INT32(temp1, oneFloatInt, calcCount);
    auto float_temp3 = temp3.ReinterpretCast<float>();
    Select(float_temp3, temp2, temp1, float(0),
           SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
    ReduceSum(temp1, float_temp3, work_local, calcCount);
    if (loop == 0) {
      GTensorName(Y).SetValue(i, T(temp1.GetValue(0)));
    } else {
      GTensorName(Y).SetValue(
          i, T(float(GTensorName(Y).GetValue(i)) + temp1.GetValue(0)));
    }
  }

  template <typename T>
  __aicore__ inline void Histogram(LocalTensor<float> x, uint32_t calcCount,
                                   uint32_t loop) {
    TBufGet(TEMP1, float);
    TBufGet(TEMP2, int8_t);
    TBufGet(TEMP3, int8_t);
    TBufGet(wORK_LOCAL, float);
    for (auto i = 0; i < bins - 1; ++i) {
      HistogramOnce<T>(x, TBufTensorName(TEMP1), TBufTensorName(TEMP2),
                       TBufTensorName(TEMP3), TBufTensorName(wORK_LOCAL),
                       calcCount, loop, i, min + i * step, min + (i + 1) * step,
                       CMPMODE::LT);
    }
    HistogramOnce<T>(x, TBufTensorName(TEMP1), TBufTensorName(TEMP2),
                     TBufTensorName(TEMP3), TBufTensorName(wORK_LOCAL),
                     calcCount, loop, bins - 1, max - step, max, CMPMODE::LE);
  }

  __aicore__ inline void ComputeFloat(uint32_t loop, uint32_t calcCount) {
    DeQueSimple(X);
    Histogram<float>(LTensorName(X), calcCount, loop);
    QueFree(X);
  }
};

extern "C" __global__ __aicore__ void
histogram(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  Kernel<DTYPE_X, DTYPE_Y> op;
  op.Init(x, y, tiling_data.bins, tiling_data.min, tiling_data.max,
          tiling_data.tileLength, tiling_data.size, tiling_data.finalLength);
  op.Process();
}