// 本文件内大部分函数都参考自tensorflow 2.16.2的源代码
// https://github.com/tensorflow/tensorflow/tree/v2.16.2
// 还有对于Reduction部分，参考自Eigen的源代码
// https://gitlab.com/libeigen/eigen
#pragma once
#include "kernel_operator.h"
#include "local_list.h"
#include "op_common.h"
#include "simple_hashmap.h"
#include "simple_list.h"
#include "simple_transpose.h"
#include <cstdint>
#include <type_traits>
namespace AscendC {
// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/Eigen/src/Core/util/XprHelper.h
// 搜索： functor_traits
template <typename T> struct functor_traits {
  constexpr static float Cost = 10;
};
// https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
template <typename Reducer> struct reducer_traits {
  enum {
    Cost = 1,
    PacketAccess = false,
    IsStateful = false,
    IsExactlyAssociative = true
  };
};

// 参考：
// https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
// Standard reduction functors
// template <typename T> struct SumReducer {
//   __aicore__ inline void reduce(const T t, T *accum) const {
//     SIMPLE_ADD(T, *accum, *accum, t);
//   }
//   __aicore__ inline void reducePacket(T *p, T *accum) const {}
//   __aicore__ inline T initialize() const { return T(0); }
//   __aicore__ inline void initializePacket(LocalTensor<T> &v) const {
//     MY_SET_ZERO(v, 8);
//   }
//   __aicore__ inline T finalize(const T accum) const { return accum; }
//   __aicore__ inline LocalTensor<T>
//   finalizePacket(const LocalTensor<T> &vaccum) const {
//     return vaccum;
//   }
//   __aicore__ inline T finalizeBoth(const T saccum,
//                                    const LocalTensor<T> &vaccum) const {}
// };

// template <typename T> struct reducer_traits<SumReducer<T>> {
//   enum {
//     Cost = 1,
//     IsStateful = false,
//     IsExactlyAssociative = std::is_same_v<T, int32_t>
//   };
// };

template <typename IndexType = int64_t>
__aicore__ inline bool NextIndex(const int num_dims, const int64_t *dims,
                                 IndexType *current) {
  if (num_dims == 0) {
    return false;
  }
  int64_t carry = 1;
  for (int idx = num_dims - 1; idx >= 0; --idx) {
    IndexType current_val = current[idx] + carry;
    if (dims[idx] == current_val) {
      current[idx] = 0;
    } else {
      current[idx] = current_val;
      carry = 0;
      break;
    }
  }
  return (carry == 0);
}

__aicore__ inline size_t ReducedOutputOffset(const int64_t num_dims,
                                             const int64_t *dims,
                                             const int64_t *index,
                                             const int64_t num_axis,
                                             const int64_t *axis) {
  if (num_dims == 0) {
    return 0;
  }
  size_t offset = 0;
  for (int idx = 0; idx < num_dims; ++idx) {
    // if we need to skip this axis
    bool is_axis = false;
    if (axis != nullptr) {
      for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) {
        if (idx == axis[axis_idx]) {
          is_axis = true;
          break;
        }
      }
    }
    if (!is_axis) {
      offset = offset * static_cast<size_t>(dims[idx]) +
               static_cast<size_t>(index[idx]);
    }
  }
  return offset;
}

template <typename T>
__aicore__ inline void insertionSort(T *arr, int64_t left, int64_t right) {
  for (int64_t i = left + 1; i <= right; ++i) {
    int64_t key = arr[i];
    int64_t j = i - 1;

    while (j >= left && arr[j] > key) {
      arr[j + 1] = arr[j];
      --j;
    }

    arr[j + 1] = key;
  }
}

__aicore__ inline void RemoveSize1Dims(int64_t *shape_out,
                                       int64_t &out_num_dims, int64_t *axis_out,
                                       int64_t &out_num_axis) {
  for (int64_t i = 0; i < out_num_dims;) {
    if (shape_out[i] == 1) {
      for (int64_t j = i + 1; j < out_num_dims; ++j) {
        shape_out[j - 1] = shape_out[j];
      }
      for (int64_t j = 0; j < out_num_axis; ++j) {
        if (axis_out[j] == i) {
          for (int64_t k = j + 1; k < out_num_axis; ++k) {
            axis_out[k - 1] = axis_out[k];
          }
          out_num_axis -= 1;
          break;
        }
      }
      for (int64_t j = 0; j < out_num_axis; ++j) {
        if (axis_out[j] > i) {
          axis_out[j] -= 1;
        }
      }
      --out_num_dims;
    } else {
      ++i;
    }
  }
}

__aicore__ inline bool ResolveAxis(const int64_t num_dims, const int64_t *axis,
                                   const int64_t num_axis, int64_t *axis_out,
                                   int64_t &out_num_axis,
                                   const int64_t *shape_in, int64_t *shape_out,
                                   int64_t &out_num_dims) {
  if (num_dims == 0) {
    out_num_axis = 0;
    out_num_dims = 0;
    return true;
  }
  out_num_axis = 0;
  out_num_dims = num_dims;
  for (int64_t idx = 0; idx < num_axis; ++idx) {
    int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
    if (current < 0 || current >= num_dims) {
      return false;
    }
    bool is_dup = false;
    for (int j = 0; j < out_num_axis; ++j) {
      if (axis_out[j] == current) {
        is_dup = true;
        break;
      }
    }
    if (!is_dup) {
      axis_out[out_num_axis] = current;
      out_num_axis += 1;
    }
  }
  my_memcpy(shape_out, shape_in, num_dims);
  insertionSort(axis_out, 0, out_num_axis - 1);

  RemoveSize1Dims(shape_out, out_num_dims, axis_out, out_num_axis);
  if (out_num_axis > 0) {
    int64_t j = out_num_axis - 1;
    bool previous_here = (axis_out[j] == out_num_dims - 1);
    if (previous_here) {
      j -= 1;
    }

    for (int64_t i = out_num_dims - 2; i >= 0; --i) {
      bool current_here = j >= 0 ? (axis_out[j] == i) : false;
      if (current_here == previous_here) {
        shape_out[i] *= shape_out[i + 1];
        for (int64_t k = i + 1; k + 1 < out_num_dims; ++k) {
          shape_out[k] = shape_out[k + 1];
        }
        for (int64_t k = 0; k < out_num_axis; ++k) {
          if (axis_out[k] > i) {
            axis_out[k] -= 1;
          }
        }
        if (current_here) {
          for (int64_t k = j + 1; k + 1 < out_num_axis; ++k) {
            axis_out[k] = axis_out[k + 1];
          }
          out_num_axis -= 1;
        }
        out_num_dims -= 1;
      }
      if (current_here) {
        j -= 1;
      }
      previous_here = current_here;
    }
  }
  return true;
}

template <typename T, typename U>
__aicore__ inline bool IsFirstReduction(const T *index, const U num_axis,
                                        const T *axis) {
  if (num_axis == 0) {
    return true;
  }

  for (U axis_idx = 0; axis_idx < num_axis; ++axis_idx) {
    if (index[axis[axis_idx]] != 0) {
      return false;
    }
  }

  return true;
}

typedef IntHashMap<MAX_SHAPE_DIM, int, int> AxisHashMap;

// 参考:
// https://github.com/tensorflow/tensorflow/blob/v2.16.2/tensorflow/core/kernels/reduction_ops_common.cc
// 搜索： SimplifyHelper
template <typename AxisType>
__aicore__ inline bool SimplifyHelper(const ShapeData &inputShape,
                                      const AxisType *axis, int64_t num_axis,
                                      AxisHashMap &bitmap) {
  for (int64_t i = 0; i < num_axis; ++i) {
    auto index = axis[i];
    if (index < -inputShape.dimNum || index >= inputShape.dimNum) {
      return false;
    }
    index = (index + inputShape.dimNum) % inputShape.dimNum;
    if (bitmap.get(index)) {
      return false;
    }
    bitmap.set(index, 1);
  }
  return true;
}

// 参考:
// https://github.com/tensorflow/tensorflow/blob/v2.16.2/tensorflow/core/kernels/reduction_ops_common.cc
// 搜索： ReductionHelper::Simplify
class ReductionHelper {
public:
  bool reduce_first_axis_; // 第一个维度是否需要reduce
  ShapeData data_reshape_; // 在做reduce操作前，对输入数据reshape后的shape
  ShapeData out_shape_;   // 输出shape out_reshape_->out_shape_
  ShapeData out_reshape_; // 对data_reshape_->out_reshape_
public:
  __aicore__ inline ReductionHelper() : reduce_first_axis_(false) {}

  template <typename AxisType>
  __aicore__ inline bool Simplify(ShapeData &inputShape, const AxisType *axis,
                                  int64_t num_axis, const bool keep_dims) {
    printf("%s:%d\n", __FILE__, __LINE__);
    // bitmap[i]：是否会对第i个维度做缩减操作.
    AxisHashMap bitmap;
    if (!SimplifyHelper(inputShape, axis, num_axis, bitmap)) {
      return false;
    };
    bitmap.print_("axis bitmap");

    for (int64_t i = 0; i < inputShape.dimNum; ++i) {
      if (!bitmap.get(i)) {
        out_shape_.push_back(inputShape[i]);
      } else if (keep_dims) {
        out_shape_.push_back(1);
      }
    }
    int dim_index = 0;
    for (; dim_index < inputShape.dimNum; ++dim_index) {
      if (inputShape[dim_index] != 1) {
        break;
      }
    }
    if (dim_index >= inputShape.dimNum) {
      // 特殊情况，输入的是一个标量
      reduce_first_axis_ = true;
    } else {
      //  [2, 1, 3, 1, 5] =[1, 4]==> [6, 5](1)
      // 1.将需要操作的，但是维度为1的删除掉
      // 2.将连续的不需要reduce操作的合并;
      // 3.将连续的需要reduce的合并
      reduce_first_axis_ = bitmap.get(dim_index);
      data_reshape_.push_back(inputShape[dim_index]);
      ++dim_index;
      for (; dim_index < inputShape.dimNum; ++dim_index) {
        const auto size = inputShape[dim_index];
        // 1.将需要操作的，但是维度为1的删除掉
        if (size == 1) {
          if (!bitmap.set(dim_index, bitmap.get(dim_index - 1))) {
            printf("bitmap set error! %s:%d\n", __FILE__, __LINE__);
            return false;
          }
        }
        if (bitmap.get(dim_index - 1) != bitmap.get(dim_index)) {
          data_reshape_.push_back(size);
        } else {
          auto back_index = data_reshape_.dimNum - 1;
          data_reshape_.set(back_index, data_reshape_[back_index] * size);
        }
      }
      // 合并后的shape，y:需要reduce,n:不需要reduce,一定是[y,n,y,n]或者[n,y,n,y]
      // reduce_first_axis_? [y,n,y,n] : [n,y,n,y]
      // 所以out_reshape_只需要step=2迭代就可以出来了
      for (size_t i = reduce_first_axis_ ? 1 : 0; i < data_reshape_.dimNum;
           i += 2) {
        out_reshape_.push_back(data_reshape_[i]);
      }
    }

    inputShape.print_("输入");
    data_reshape_.print_("输入reshape");
    out_reshape_.print_("输出reshape");
    out_shape_.print_("输出 shape");
    return true;
  }

  __aicore__ inline int64_t ndims() const { return data_reshape_.dimNum; }

  __aicore__ inline void shuffled_shape(int64_t *shape) {
    const int dims = data_reshape_.dimNum;
    int counter = 0;
    for (int i = reduce_first_axis_ ? 1 : 0; i < dims; i += 2) {
      shape[counter++] = data_reshape_[i];
    }
    for (int i = reduce_first_axis_ ? 0 : 1; i < dims; i += 2) {
      shape[counter++] = data_reshape_[i];
    }
  }

  __aicore__ inline void permutation(int64_t *perm) {
    const int dims = data_reshape_.dimNum;
    const int unreduced_dims = (dims + !reduce_first_axis_) / 2;
    for (int i = 0; i < unreduced_dims; i++) {
      perm[i] = 2 * i + reduce_first_axis_;
    }
    for (int i = unreduced_dims; i < dims; i++) {
      perm[i] = 2 * (i - unreduced_dims) + !reduce_first_axis_;
    }
  }
};

// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
// 搜索： LeafSize
template <typename T> struct LeafSize { static const int64_t value = 1024; };
template <> struct LeafSize<half> { static const int64_t value = 200; };

// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
// 搜索： InnerMostDimReducer
template <typename T, bool usePacket = false, bool useTreeReduction = false>
struct InnerMostDimReducer {
  static __aicore__ inline T reduce(GlobalTensor<T> gX, int64_t startIdx,
                                    int64_t numValuesToReduce,
                                    PermuteHelper *permuteHelper = nullptr) {
    T accum = 0;
    for (auto j = 0; j < numValuesToReduce; ++j) {
      auto index =
          permuteHelper ? permuteHelper->get(startIdx + j) : startIdx + j;
      SIMPLE_ADD(T, accum, accum, gX(index));
    }
    return accum;
  }
};

// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
// 搜索： InnerMostDimReducer
template <typename T> struct InnerMostDimReducer<T, false, true> {
  template <typename stackT, typename retListT>
  static __aicore__ inline T reduce(GlobalTensor<T> gX, int64_t startIdx,
                                    int64_t numValuesToReduce, stackT *stack,
                                    retListT *retList,
                                    PermuteHelper *permuteHelper = nullptr) {
    stack->push_back(startIdx);
    stack->push_back(numValuesToReduce);
    while (!stack->empty()) {
      auto currentNumValues = stack->pop_back();
      auto currentIndex = stack->pop_back();
      if (currentNumValues > LeafSize<T>::value) {
        const auto halfCount = currentNumValues / 2;
        stack->push_back(currentIndex + halfCount);
        stack->push_back(currentNumValues - halfCount);
        stack->push_back(currentIndex);
        stack->push_back(halfCount);
      } else {
        T ret = InnerMostDimReducer<T, false, false>::reduce(
            gX, currentIndex, currentNumValues, permuteHelper);
        retList->push_back(ret);
      }
    }
    if constexpr (std::is_same_v<retListT, LocalList<T>>) {
      while (retList->size_ > 1) {
        for (auto i = 0; i < retList->size_ - 1; i += 2) {
          SIMPLE_ADD(T, retList->data_(i), retList->data_(i),
                     retList->data_(i + 1));
        }
        for (auto i = retList->size_ - 1; i >= 0; i -= 2) {
          retList->remove(i);
        }
      }
      return retList->data_(0);
    } else if constexpr (std::is_same_v<retListT, SimpleList<T>>) {
      while (retList->size_ > 1) {
        for (auto i = 0; i < retList->size_ - 1; i += 2) {
          SIMPLE_ADD(T, retList->data_[i], retList->data_[i],
                     retList->data_[i + 1]);
        }
        for (auto i = retList->size_ - 1; i >= 0; i -= 2) {
          retList->remove(i);
        }
      }
      return retList->data_[0];
    }
  }
};

// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
// 搜索： TensorOpCost
struct TensorOpCost {
  float bytes_loaded_;
  float bytes_stored_;
  float compute_cycles_;
  __aicore__ inline TensorOpCost()
      : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {}
  __aicore__ inline TensorOpCost(float bytes_loaded, float bytes_stored,
                                 float compute_cycles)
      : bytes_loaded_(bytes_loaded), bytes_stored_(bytes_stored),
        compute_cycles_(compute_cycles) {}

  __aicore__ inline TensorOpCost(float bytes_loaded, float bytes_stored,
                                 float compute_cycles, bool vectorized,
                                 float packet_size)
      : bytes_loaded_(bytes_loaded), bytes_stored_(bytes_stored),
        compute_cycles_(vectorized ? compute_cycles / packet_size
                                   : compute_cycles) {}

  __aicore__ inline float bytes_loaded() const { return bytes_loaded_; }
  __aicore__ inline float bytes_stored() const { return bytes_stored_; }
  __aicore__ inline float compute_cycles() const { return compute_cycles_; }
  __aicore__ inline float total_cost(float load_cost, float store_cost,
                                     float compute_cost) const {
    return load_cost * bytes_loaded_ + store_cost * bytes_stored_ +
           compute_cost * compute_cycles_;
  }

  // Drop memory access component. Intended for cases when memory accesses are
  // sequential or are completely masked by computations.
  __aicore__ inline void dropMemoryCost() {
    bytes_loaded_ = 0;
    bytes_stored_ = 0;
  }

  // TODO(rmlarsen): Define min in terms of total cost, not elementwise.
  __aicore__ inline TensorOpCost cwiseMin(const TensorOpCost &rhs) const {
    float bytes_loaded = GET_MIN(bytes_loaded_, rhs.bytes_loaded());
    float bytes_stored = GET_MIN(bytes_stored_, rhs.bytes_stored());
    float compute_cycles = GET_MIN(compute_cycles_, rhs.compute_cycles());
    return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
  }

  // TODO(rmlarsen): Define max in terms of total cost, not elementwise.
  __aicore__ inline TensorOpCost cwiseMax(const TensorOpCost &rhs) const {
    float bytes_loaded = GET_MAX(bytes_loaded_, rhs.bytes_loaded());
    float bytes_stored = GET_MAX(bytes_stored_, rhs.bytes_stored());
    float compute_cycles = GET_MAX(compute_cycles_, rhs.compute_cycles());
    return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles);
  }

  __aicore__ inline TensorOpCost &operator+=(const TensorOpCost &rhs) {
    bytes_loaded_ += rhs.bytes_loaded();
    bytes_stored_ += rhs.bytes_stored();
    compute_cycles_ += rhs.compute_cycles();
    return *this;
  }

  __aicore__ inline TensorOpCost &operator*=(float rhs) {
    bytes_loaded_ *= rhs;
    bytes_stored_ *= rhs;
    compute_cycles_ *= rhs;
    return *this;
  }

  __aicore__ inline friend TensorOpCost operator+(TensorOpCost lhs,
                                                  const TensorOpCost &rhs) {
    lhs += rhs;
    return lhs;
  }

  __aicore__ inline friend TensorOpCost operator*(TensorOpCost lhs, float rhs) {
    lhs *= rhs;
    return lhs;
  }

  __aicore__ inline friend TensorOpCost operator*(float lhs, TensorOpCost rhs) {
    rhs *= lhs;
    return rhs;
  }
};

// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
// 搜索： TensorCostModel
class TensorCostModel {
public:
  // Scaling from Eigen compute cost to device cycles.
  // 从特征计算成本扩展到设备周期。
  static const int kDeviceCyclesPerComputeCycle = 1;

  // Costs in device cycles.
  // 设备周期成本。
  static const int kStartupCycles = 100000;
  static const int kPerThreadCycles = 100000;
  static const int kTaskSize = 40000;

  // Returns the number of threads in [1:max_threads] to use for evaluating an
  // expression with the given output size and cost per coefficient. 返回
  // [1:max_threads] 中的线程数，
  // 用于计算具有给定输出大小和每个系数成本的表达式。
  static __aicore__ inline int numThreads(float output_size,
                                          const TensorOpCost &cost_per_coeff,
                                          int max_threads) {
    float cost = totalCost(output_size, cost_per_coeff);
    float threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9f;
    // Make sure we don't invoke undefined behavior when we convert to an int.
    // 确保在转换为 int 时不会调用未定义的行为。
    threads = GET_MIN(threads, float(0X7fffffff));
    return GET_MIN(max_threads, GET_MAX(1, static_cast<int>(threads)));
  }

  // taskSize assesses parallel task size.
  // Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task
  // granularity needs to be increased to mitigate parallelization overheads.
  // taskSize 评估并行任务的大小。
  // 值 1.0 表示理想的并行任务大小。值 < 1.0
  // 意味着需要增加任务粒度以减轻并行化开销。
  static __aicore__ inline float taskSize(float output_size,
                                          const TensorOpCost &cost_per_coeff) {
    return totalCost(output_size, cost_per_coeff) / kTaskSize;
  }

  static __aicore__ inline float totalCost(float output_size,
                                           const TensorOpCost &cost_per_coeff) {
    // Cost of memory fetches from L2 cache. 64 is typical cache line size.
    // 11 is L2 cache latency on Haswell.
    // We don't know whether data is in L1, L2 or L3. But we are most interested
    // in single-threaded computational time around 100us-10ms (smaller time
    // is too small for parallelization, larger time is not interesting
    // either because we are probably using all available threads already).
    // And for the target time range, L2 seems to be what matters. Data set
    // fitting into L1 is too small to take noticeable time. Data set fitting
    // only into L3 presumably will take more than 10ms to load and process.
    // 从 L2 缓存获取内存的成本。 64 是典型的高速缓存行大小。
    // 11 是 Haswell 上的 L2 缓存延迟。
    // 我们不知道数据是在 L1、L2 还是 L3 中。但我们最感兴趣的是
    // 单线程计算时间在100us-10ms左右（更小的时间
    // 对于并行化来说太小，更大的时间没有意义
    // 或者因为我们可能已经使用了所有可用的线程）。
    // 对于目标时间范围，L2 似乎才是最重要的。数据集
    // 拟合到 L1 太小，无法花费明显的时间。数据集拟合
    // 仅进入 L3 大概需要 10 毫秒以上的时间来加载和处理。
    const float kLoadCycles = 1.0 / 64 * 11;
    const float kStoreCycles = 1.0 / 64 * 11;
    // Scaling from Eigen compute cost to device cycles.
    return output_size *
           cost_per_coeff.total_cost(kLoadCycles, kStoreCycles,
                                     kDeviceCyclesPerComputeCycle);
  }
};

// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
// 搜索：FullReducer
// 对于orangepi max_threads=3
constexpr int64_t max_threads = 3;
template <typename T, typename stackT, typename retlistT>
__aicore__ inline T fullReduce(GlobalTensor<T> gX, int64_t startIdx,
                               int64_t numValuesToReduce, stackT *stack,
                               retlistT *retList) {
  const TensorOpCost cost =
      TensorOpCost(float(int(sizeof(T))), 0.0f, 0.0f, false, 1.0f) +
      TensorOpCost(0.0f, 0.0f, functor_traits<T>::Cost, false, 1.0f);
  const int num_threads =
      TensorCostModel::numThreads(numValuesToReduce, cost, max_threads);
  if (num_threads == 1) {
    return InnerMostDimReducer<T, false, true>::reduce(
        gX, startIdx, numValuesToReduce, stack, retList);
  }

  const int blocksize = numValuesToReduce / num_threads;
  const int numblocks = blocksize > 0 ? numValuesToReduce / blocksize : 0;
  assert(numValuesToReduce >= numblocks * blocksize);

  T shards[max_threads] = {0};
  for (int i = 0; i < numblocks; ++i) {
    shards[i] = InnerMostDimReducer<T, false, true>::reduce(
        gX, i * blocksize, blocksize, stack, retList);
    stack->clear();
    retList->clear();
  }
  T finalShard = 0;
  if (numblocks * blocksize < numValuesToReduce) {
    finalShard = InnerMostDimReducer<T, false, true>::reduce(
        gX, numblocks * blocksize, numValuesToReduce - numblocks * blocksize,
        stack, retList);
  }
  for (int i = 0; i < numblocks; ++i) {
    SIMPLE_ADD(T, finalShard, finalShard, shards[i]);
  }
  return finalShard;
};

// 参考:https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
// 搜索：costPerCoeff
template <typename T>
__aicore__ inline TensorOpCost costPerCoeffAssign(int64_t numValuesToReduce) {
  constexpr auto data_type_size = float(int(sizeof(T)));
  auto left_cost = TensorOpCost(data_type_size, 0.0f, 0.0f, false, 1);
  auto compute_cost = numValuesToReduce * functor_traits<T>::Cost;
  auto one_coeff_load_cost = left_cost;
  auto right_cost = one_coeff_load_cost * numValuesToReduce +
                    TensorOpCost(0.0f, 0.0f, compute_cost, false, 1);
  return right_cost +
         TensorOpCost(GET_MAX(0.0f, left_cost.bytes_loaded_ - data_type_size),
                      left_cost.bytes_stored_, left_cost.compute_cycles_) +
         TensorOpCost(0.0f, data_type_size, 0.0f, false, 1);
}
// 参考:https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
// 搜索： CalculateParallelForBlock
__aicore__ inline void CalculateParallelForBlock(const int64_t numValues,
                                                 const TensorOpCost &cost,
                                                 int64_t &blockSize,
                                                 int64_t &blockCount) {
  const float block_size_f = 1.0f / TensorCostModel::taskSize(1, cost);
  const int64_t max_oversharding_factor = 4;
  int64_t block_size = GET_MIN(
      numValues, GET_MAX((int64_t)CEIL_DIV(numValues, max_oversharding_factor *
                                                          max_threads),
                         block_size_f));
  const int64_t max_block_size = GET_MIN(numValues, 2 * block_size);

  auto block_count = CEIL_DIV(numValues, block_size);
  // Calculate parallel efficiency as fraction of total CPU time used for
  // computations:
  float max_efficiency =
      static_cast<float>(block_count) /
      ((int64_t)CEIL_DIV(block_count, max_threads) * max_threads);

  // Now try to increase block size up to max_block_size as long as it
  // doesn't decrease parallel efficiency.
  for (auto prev_block_count = block_count;
       max_efficiency < 1.0f && prev_block_count > 1;) {
    // This is the next block size that divides size into a smaller number
    // of blocks than the current block_size.
    auto coarser_block_size = CEIL_DIV(numValues, prev_block_count - 1);
    if (coarser_block_size > max_block_size) {
      break; // Reached max block size. Stop.
    }
    // Recalculate parallel efficiency.
    const auto coarser_block_count = CEIL_DIV(numValues, coarser_block_size);
    assert(coarser_block_count < prev_block_count);
    prev_block_count = coarser_block_count;
    const float coarser_efficiency =
        static_cast<float>(coarser_block_count) /
        ((int64_t)CEIL_DIV(coarser_block_count, max_threads) * max_threads);
    if (coarser_efficiency + 0.01f >= max_efficiency) {
      // Taking it.
      block_size = coarser_block_size;
      block_count = coarser_block_count;
      if (max_efficiency < coarser_efficiency) {
        max_efficiency = coarser_efficiency;
      }
    }
  }
  blockSize = block_size;
  blockCount = block_count;
}

// 参考：https://gitlab.com/libeigen/eigen/-/blob/master/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
// 搜索 GenericDimReducer
template <int DimIndex, typename T> struct GenericDimReducer {
  static __aicore__ inline void reduce(GlobalTensor<T> gX, int64_t firstIndex,
                                       T *accum, int64_t *reducedDims,
                                       int64_t *reducedStrides) {
    for (int j = 0; j < reducedDims[DimIndex]; ++j) {
      const int64_t input = firstIndex + j * reducedStrides[DimIndex];
      GenericDimReducer<DimIndex - 1, T>::reduce(gX, input, accum, reducedDims,
                                                 reducedStrides);
    }
  }
};
template <typename T> struct GenericDimReducer<0, T> {
  static __aicore__ inline void reduce(GlobalTensor<T> gX, int64_t firstIndex,
                                       T *accum, int64_t *reducedDims,
                                       int64_t *reducedStrides) {
    for (int j = 0; j < reducedDims[0]; ++j) {
      const int64_t input = firstIndex + j * reducedStrides[0];
      SIMPLE_ADD(T, *accum, *accum, gX(input));
    }
  }
};
template <typename T> struct GenericDimReducer<-1, T> {
  static __aicore__ inline void reduce(GlobalTensor<T> gX, int64_t firstIndex,
                                       T *accum, int64_t *reducedDims,
                                       int64_t *reducedStrides) {
    SIMPLE_ADD(T, *accum, *accum, gX(firstIndex));
  }
};

template <int DimIndex, typename T> struct InnerMostDimPreserver {
  static __aicore__ inline void reduce(GlobalTensor<T> gX, int64_t firstIndex,
                                       T *accum, int64_t *reducedDims,
                                       int64_t *reducedStrides) {
    for (auto j = 0; j < reducedDims[DimIndex]; ++j) {
      const auto input = firstIndex + j * reducedStrides[DimIndex];
      InnerMostDimPreserver<DimIndex - 1, T>::reduce(
          gX, input, accum, reducedDims, reducedStrides);
    }
  }
};

// template <typename T> struct InnerMostDimPreserver<0, T> {
//   static __aicore__ inline void reduce(GlobalTensor<T> gX, int64_t firstIndex,
//                                        T *accum, int64_t *reducedDims,
//                                        int64_t *reducedStrides) {
//     const auto stride = reducedStrides[0];
//     const auto size = reducedDims[0];
//     if (size >= 16) {
//       const auto unrolled_size4 = (size / 4) * 4;
//       T accum1 = T(0);
//       T accum2 = T(0);
//       T accum3 = T(0);
//       for (auto j = 0; j < unrolled_size4; j += 4) {
//         const auto input0 = firstIndex + j * stride;
//         reducer0.reducePacket(self.m_impl.template packet<Unaligned>(input0),
//                               accum0);
//         const auto input1 = firstIndex + (j + 1) * stride;
//         reducer0.reducePacket(self.m_impl.template packet<Unaligned>(input1),
//                               &accum1);
//         const auto input2 = firstIndex + (j + 2) * stride;
//         reducer0.reducePacket(self.m_impl.template packet<Unaligned>(input2),
//                               &accum2);
//         const auto input3 = firstIndex + (j + 3) * stride;
//         reducer0.reducePacket(self.m_impl.template packet<Unaligned>(input3),
//                               &accum3);
//       }
//       reducer0.reducePacket(accum1, accum0);
//       reducer0.reducePacket(accum2, accum0);
//       reducer0.reducePacket(accum3, accum0);
//       for (auto j = unrolled_size4; j < size; ++j) {
//         auto input = firstIndex + j * stride;
//         reducer0.reducePacket(self.m_impl.template packet<Unaligned>(input),
//                               accum0);
//       }
//     } else {
//       for (auto j = 0; j < size; ++j) {
//         auto input = firstIndex + j * stride;
//         reducer0.reducePacket(self.m_impl.template packet<Unaligned>(input),
//                               accum0);
//       }
//     }
//   }
// };

} // namespace AscendC
