//
// Created by 37417 on 2024/4/5.
//

#ifndef HELLOTEST_SEGMENT_REDUCTION_H
#define HELLOTEST_SEGMENT_REDUCTION_H

#include <valarray>

#include "other.h"
namespace segment {

bool debug = true;

template <typename T>
void DisPlay(std::string name, T* vec, size_t size) {
  std::cout << "[DEBUG] Display data : " << name << " : ";
  for (int i = 0; i < size; i++) {
    std::cout << vec[i] << " ";
  }
  std::cout << std::endl;
}
struct BlockDim {
  int x = 1;
  int y = 1;
};
BlockDim blockDim;

struct BlockIdx {
  int x = 1;
  int y = 1;
};
BlockIdx blockIdx;

struct GridDim {
  int x = 1;
  int y = 1;
};
GridDim gridDim;

struct ThreadIdx {
  int x = 1;
  int y = 1;
};
ThreadIdx threadIdx;

using Status = bool;

Status OkStatus() { return true; }

class OpKernelContext {};

namespace functor {
// Initial value functors.
template <typename T>
struct Zero {
  /*EIGEN_STRONG_INLINE*/ T operator()() const { return T(0); }
};

struct Sum {
  template <typename T>
  /*__host__ __device__*/ T operator()(const T& a, const T& b) const {
    return a + b;
  }
};
}  // namespace functor

void check(const std::string& msg) { std::cout << msg << std::endl; }
#define DCHECK_EQ(x, y) \
  if (x != y) check("no eq!")

void SayHello();

// 用于 Radix Sort 的辅助函数，计算前缀和
void prefixSum(std::vector<int>& counts) {
  int sum = 0;
  for (int& count : counts) {
    int old = count;
    count = sum;
    sum += old;
  }
}

// 用于 Radix Sort 的辅助函数，根据键值和计数数组重新分配键和索引
void redistributeKeys(std::vector<int>& keys, std::vector<int>& indices, std::vector<int>& counts,
                      int base, int size) {
  std::vector<int> tempKeys(size);
  std::vector<int> tempIndices(size);
  for (size_t i = 0; i < size; ++i) {
    size_t digit = keys[i] / base % counts.size();
    tempKeys[counts[digit]] = keys[i];
    tempIndices[counts[digit]] = indices[i];
    counts[digit]++;
  }
  keys = std::move(tempKeys);
  indices = std::move(tempIndices);
}

// 通用的 Radix Sort 模板函数
template <typename T>
void RadixSort(int size, const std::vector<T>& key_in, std::vector<T>& keys_out,
               const std::vector<int>& indices_in, std::vector<int>& indices_out) {
  // 初始化输出向量
  keys_out.resize(size);
  indices_out.resize(size);
  std::copy(key_in.begin(), key_in.end(), keys_out.begin());
  std::copy(indices_in.begin(), indices_in.end(), indices_out.begin());

  // 确定最大值以计算需要的位数
  T max_key = *std::max_element(key_in.begin(), key_in.end());

  // 计算最大键值的位数
  int max_digits = 0;
  while (max_key > 0) {
    max_key /= 10;
    ++max_digits;
  }

  // 对每个位进行排序
  for (int digit = 0; digit < max_digits; ++digit) {
    // 创建计数数组
    std::vector<int> counts(10, 0);
    for (int i = 0; i < size; ++i) {
      int key_digit = (keys_out[i] / static_cast<T>(std::pow(10, digit))) % 10;
      ++counts[key_digit];
    }
    prefixSum(counts);

    // 根据计数数组重新分配键和索引
    redistributeKeys(keys_out, indices_out, counts, std::pow(10, digit), size);
  }
}

template <int64_t VecSize, template <int vec_size> class Functor>
struct DispatchToVectorizedHelper {
  template <typename... Args>
  Status operator()(int64_t max_vec_size, Args&&... args) const {
    if (max_vec_size >= VecSize) {
      return Functor<VecSize>()(std::forward<Args>(args)...);
    }
    return DispatchToVectorizedHelper<VecSize / 2, Functor>()(max_vec_size,
                                                              std::forward<Args>(args)...);
  }
};

template <template <int vec_size> class Functor>
struct DispatchToVectorizedHelper<1, Functor> {
  template <typename... Args>
  Status operator()(int64_t max_vec_size, Args&&... args) const {
    return Functor<1>()(std::forward<Args>(args)...);
  }
};

template <typename T, template <int vec_size> class Functor, typename... Args>
Status DispatchToVectorized(int64_t max_vec_size, Args&&... args) {
  static_assert((sizeof(T) & (sizeof(T) - 1)) == 0, "sizeof(T) must be a power of 2");
  if (max_vec_size <= 0) {
    // TODO
    //    return errors::InvalidArgument("DispatchToVectorized: max_vec_size (",
    //                                   max_vec_size,
    //                                   ") must be greater than zero.");
    std::cout << "ERROR :  "
              << "DispatchToVectorized: max_vec_size (" << max_vec_size
              << ") must be greater than zero." << std::endl;
  }
  constexpr const int kOptimalVecSizeBytes = 16;
  // The optimal number of (aligned) elements of T to load/store in a
  // single instruction inside a kernel.
  constexpr const int optimal_vec_size = (kOptimalVecSizeBytes - 1) / sizeof(T) + 1;
  return DispatchToVectorizedHelper<optimal_vec_size, Functor>()(max_vec_size,
                                                                 std::forward<Args>(args)...);
}

// Returns the maximum power-of-two alignment (in units of elements, not bytes)
// of a stride or pointer value.
inline int64_t alignment_of(int64_t element_stride) {
  // A zero/nullptr value means that the stride/pointer is not used, so it
  // effectively has infinite alignment.
  constexpr int64_t kMaxAlignment = 512;
  if (element_stride == 0) return kMaxAlignment;
  return element_stride & -element_stride;
}

template <typename T>
inline int64_t alignment_of(T* ptr) {
  const intptr_t ptr_val = reinterpret_cast<std::uintptr_t>(ptr);
  // Pointers should always be aligned to sizeof(T) bytes.
  DCHECK_EQ(ptr_val % sizeof(T), 0);
  // Note that we want the alignment in elements, not bytes.
  return alignment_of(ptr_val / sizeof(T));
}

template <typename... Args>
int64_t MinAlignmentOf(Args... args) {
  return std::min({alignment_of(args)...});
}

template <typename Treduce>
struct SegmentReduceGPUVectorized {
  template <int vec_size>
  struct Impl {
    template <typename T, typename Toffsets, typename Tindices, typename Tsegmentids,
              typename ReduceOp, typename Tweights>
    Status operator()(OpKernelContext* ctx, Toffsets nouter, Toffsets ninner, Tsegmentids nsegments,
                      ReduceOp reduce_op, T initial_value, T empty_segment_value, bool is_mean,
                      bool is_sqrtn, const T* input, const Tsegmentids* segment_ids,
                      const Tindices* indices, const Tweights* weights, T* output) {
      DCHECK_EQ(ninner % vec_size, 0);
      DCHECK_EQ(reinterpret_cast<std::uintptr_t>(input) % vec_size, 0);
      DCHECK_EQ(reinterpret_cast<std::uintptr_t>(output) % vec_size, 0);
      Toffsets ninner_vec = ninner / vec_size;
      // TODO: AlignedVector
      //      using Tvec = AlignedVector<T, vec_size>;
      //      using Treducevec = AlignedVector<Treduce, vec_size>;
      //      const Tvec* input_vec = reinterpret_cast<const Tvec*>(input);
      //      Tvec* output_vec = reinterpret_cast<Tvec*>(output);
      if (debug) {
        std::cout << "SegmentReduceGPUVectorized=====================" << std::endl;
        DisPlay("input", input, nouter * ninner);
        DisPlay("segment_ids", segment_ids, nouter);
        DisPlay("indices", indices, nouter);
      }

      //            return SegmentReduceGPUImpl<Treducevec>(ctx, nouter, ninner_vec, nsegments,
      //            reduce_op,
      //                                                    initial_value, empty_segment_value,
      //                                                    is_mean, is_sqrtn, input_vec,
      //                                                    segment_ids, indices, weights,
      //                                                    output_vec);
      return SegmentReduceGPUImpl<Treduce>(ctx, nouter, ninner_vec, nsegments, reduce_op,
                                           initial_value, empty_segment_value, is_mean, is_sqrtn,
                                           input, segment_ids, indices, weights, output);
    }
  };
};
#define GPU_1D_KERNEL_LOOP(i, n) for (int i = 0; i < n; i++)

template <typename Toffsets, typename Tsegmentids>
/*__global__*/ void SegmentOffsetsKernel(
    Toffsets size, Tsegmentids nsegments,
    const Tsegmentids* __restrict__ segment_ids,  // [size]
    Toffsets* __restrict__ segment_offsets) {     // [nsegments + 1]
  GPU_1D_KERNEL_LOOP(i, size + 1) {
    // IDs are clipped to [-1, nsegments] so that out-of-bounds IDs are ignored.
    // Note that we can't report invalid IDs from the GPU without incurring
    // additional overhead.
    auto clip = [&](Tsegmentids id) { return std::min(std::max(Tsegmentids(-1), id), nsegments); };
    const Tsegmentids cur_id = (i < size) ? clip(segment_ids[i]) : nsegments;
    const Tsegmentids prev_id = (i == 0) ? Tsegmentids(-1) : clip(segment_ids[i - 1]);
    // At segment boundaries, write the offset for this ID and any missing IDs
    // since the previous one.
    for (Tsegmentids id = prev_id + 1; id <= cur_id; ++id) {
      segment_offsets[id] = i;
    }
  }
}

// Finds the start offset of each segment in the given sorted segment_ids
// vector. Missing IDs are given the same offset as the next ID so that they
// represent empty ranges. Invalid IDs (those that are outside the range
// [0, nsegments)) are ignored. The value at segment_offsets[0] is set to the
// start index of the first valid ID (e.g., 0 if all IDs are valid), and the
// value at segment_offsets[nsegments] is set to the end index of the last valid
// ID (e.g., nsegments if all IDs are valid).
template <typename Toffsets, typename Tsegmentids>
Status LaunchSegmentOffsetsKernel(/*const GPUDevice& d,*/ Toffsets size, Tsegmentids nsegments,
                                  const Tsegmentids* segment_ids,  // [size]
                                  Toffsets* segment_offsets) {     // [nsegments + 1]
                                                                   //  GpuLaunchConfig config =
  //      GetGpuLaunchConfig(size + 1, d, &SegmentOffsetsKernel<Toffsets, Tsegmentids>,
  //                         /*dynamic_shared_memory_size=*/0, /*block_size_limit=*/0);
  //  return GpuLaunchKernel(SegmentOffsetsKernel<Toffsets, Tsegmentids>, config.block_count,
  //                         config.thread_per_block, 0, d.stream(), size, nsegments, segment_ids,
  //                         segment_offsets);

  SegmentOffsetsKernel(size, nsegments, segment_ids, segment_offsets);
  return true;
}

// Reduces along columns of the thread block, returning the result in the first
// row of threads.
template <typename T, typename ReduceOp>
/*__device__*/ T ReduceBlockAlongCols(ReduceOp reduce_op, const T& value, bool is_valid) {
  // TODO
  //  GPU_DYNAMIC_SHARED_MEM_DECL(/*ALIGN=*/16, char, shared_memory_raw);
  std::vector<T> shared_memory_raw(10);
//  T* const shared_partial_reduction =
//      reinterpret_cast<T*>(shared_memory_raw);  // [blockDim.y, blockDim.x]
  T* shared_partial_reduction = shared_memory_raw.data();
  const int x = threadIdx.x;
  const int y = threadIdx.y;
  T reduced = value;
  // Reduce over the y dimension of the block.
  for (unsigned k = blockDim.y / 2; k > 0; k /= 2) {
    if (is_valid && y < 2 * k) {
      shared_partial_reduction[y * blockDim.x + x] = reduced;
    }
//    __syncthreads();
    if (is_valid && y < k) {
      reduced = reduce_op(reduced, shared_partial_reduction[(y + k) * blockDim.x + x]);
    }
//    __syncthreads();
  }
  return reduced;
}

// This kernel uses a 2D thread decomposition. The x dimension maps to the inner
// dimension of the input/output. The y grid dimension maps to segments, and y
// threads within a block cooperate to reduce over the block's segment.
// Note that Tinit is needed because Tvec and Treducevec may be vector types,
// but Tinit is always a scalar type.
// Note that the first dimension of input_vec is nouter if indices is not
// provided; otherwise it is indexed indirectly via indices and can have any
// size (as long as it spans at least the maximum value in indices). This also
// applies to the weights vector.
template <typename Treducevec, typename Tvec, typename Toffsets, typename Tindices,
          typename Tsegmentids, typename ReduceOp, typename Tinit, typename Tweights>
/*__global__ */ void SegmentReduceVectorKernel(
    Toffsets nouter, Toffsets ninner_vec, Tsegmentids nsegments, ReduceOp reduce_op,
    Tinit initial_value, Tinit empty_segment_value, bool is_mean, bool is_sqrtn,
    const Tvec* __restrict__ input_vec,            // [nouter or any, ninner_vec]
    const Toffsets* __restrict__ segment_offsets,  // [nsegments + 1]
    const Tindices* __restrict__ indices,          // [nouter] (optional)
    const Tweights* __restrict__ weights,          // [nouter or any] (optional)
    Tvec* __restrict__ output_vec) {               // [nsegments, ninner_vec]
  std::cout << ninner_vec << std::endl;
  const int num_blocks_x = (ninner_vec - 1) / blockDim.x + 1;
  std::cout << "num_blocks_x : " << num_blocks_x << std::endl;
  // Grid-stride loop over inner dimension blocks.
  for (Toffsets blk_x = blockIdx.x; blk_x < num_blocks_x; blk_x += gridDim.x) {
    const Toffsets x = threadIdx.x + blk_x * blockDim.x;
    const Toffsets y = threadIdx.y;
    const bool x_ok = x < ninner_vec;
    // Grid-stride loop over segment blocks, each processing one segment.
    for (Tsegmentids seg = blockIdx.y; seg < nsegments; seg += gridDim.y) {
      // Load segment range.
      const Toffsets begin = segment_offsets[seg];
      const Toffsets end = segment_offsets[seg + 1];
      // Reduce over the segment.
      Treducevec result = Treducevec(initial_value);
      if (debug) {
        std::cout << "begin : " << begin << " en : " << end << " result : " << result << std::endl;
      }
      // Loop over the segment, reducing blockDim.y elements at a time.
      for (Toffsets y_offset = begin; y_offset < end; y_offset += blockDim.y) {
        const bool y_ok = (y_offset + y) < end;
        // Perform indirect lookup if required.
        const Toffsets y_idx = indices && y_ok ? indices[y_offset + y] : y_offset + y;
        const int64_t input_idx = static_cast<int64_t>(y_idx) * ninner_vec + x;
        // Load the input row from global mem.
        Treducevec block_result = x_ok && y_ok ? input_vec[input_idx] : Tvec(initial_value);
        // Apply weights if provided.
        if (weights && y_ok) block_result = block_result * Tvec(weights[y_idx]);
        // Reduce along the columns of the block, returning result in first row.
        block_result = ReduceBlockAlongCols(reduce_op, block_result, x_ok);
        if (y == 0 && x_ok) {
          result = reduce_op(result, block_result);
        }
      }
      // First row of the block stores the result to global memory.
      if (y == 0 && x_ok) {
        if (begin == end) {
          // Empty segment.
          result = Treducevec(empty_segment_value);
        } else {
          // TODO not concern
          //          Tweights total_weight(end - begin);
          //          // Normalize the results if necessary.
          //          if (is_mean) {
          //            result = result / Treducevec(total_weight);
          //          } else if (is_sqrtn) {
          //            result = result / Treducevec(sqrt(static_cast<double>(total_weight)));
          //          }
        }
        // Cast from Treducevec to Tvec.
        const int64_t output_idx = static_cast<int64_t>(seg) * ninner_vec + x;
        output_vec[output_idx] = static_cast<Tvec>(result);
      }
    }
  }
}

// Return floor(log2(n)) for positive integer n.  Returns -1 iff n == 0.
inline int Log2Floor(uint32_t n) {
  if (n == 0) return -1;
  int log = 0;
  uint32_t value = n;
  for (int i = 4; i >= 0; --i) {
    int shift = (1 << i);
    uint32_t x = value >> shift;
    if (x != 0) {
      value = x;
      log += shift;
    }
  }
  assert(value == 1);
  return log;
}

// Return floor(log2(n)) for positive integer n.  Returns -1 iff n == 0.
// Log2Floor64() is defined in terms of Log2Floor32()
inline int Log2Floor64(uint64_t n) {
  const uint32_t topbits = static_cast<uint32_t>(n >> 32);
  if (topbits == 0) {
    // Top bits are zero, so scan in bottom bits
    return Log2Floor(static_cast<uint32_t>(n));
  } else {
    return 32 + Log2Floor(topbits);
  }
}

inline int Log2Ceiling(uint32_t n) {
  int floor = Log2Floor(n);
  if (n == (n & ~(n - 1)))  // zero or a power of two
    return floor;
  else
    return floor + 1;
}

inline int Log2Ceiling64(uint64_t n) {
  int floor = Log2Floor64(n);
  if (n == (n & ~(n - 1)))  // zero or a power of two
    return floor;
  else
    return floor + 1;
}

// Reduces input matrix within segments over the outer dimension. Empty segments
// always output empty_segment_value.
// If is_mean or is_sqrtn is true, the results are normalized using the
// corresponding function.
// If indices is not nullptr, input rows are accessed indirectly as
// input[indices[i]], instead of input[i].
// Note: Treducevec is to allow reducing in higher precision than Tvec.
template <typename Treducevec, typename Tvec, typename Toffsets, typename Tindices,
          typename Tsegmentids, typename ReduceOp, typename Tinit, typename Tweights>
Status LaunchSegmentReduceVectorKernel(
    /*const GPUDevice& d, */ Toffsets nouter, Toffsets ninner_vec, Tsegmentids nsegments,
    ReduceOp reduce_op, Tinit initial_value, Tinit empty_segment_value, bool is_mean, bool is_sqrtn,
    const Tvec* input_vec,            // [nouter or any, ninner_vec]
    const Toffsets* segment_offsets,  // [nsegments + 1]
    const Tindices* indices,          // [nouter] (optional)
    const Tweights* weights,          // [nouter or any] (optional)
    Tvec* output_vec) {               // [nsegments, ninner_vec]
  static constexpr const int kMaxGridX = (1u << 31) - 1;
  static constexpr const int kMaxGridY = (1u << 16) - 1;
  const int max_block_size = 1024;  // Can be tuned for perf (<= 1024)
  const int min_block_size = 64;    // Can be tuned for perf
  const Toffsets ninner_pow2 = Toffsets(1) << Log2Ceiling64(ninner_vec);
  // This is a heuristic that first allocates threads in the block to the inner
  // (x) dimension (which is most efficient) and then allocates the rest to the
  // reduction (y) dimension (which is less efficient but increases
  // parallelism).
  //  int block_x = std::min(ninner_pow2, static_cast<Toffsets>(max_block_size));
  //  const Toffsets avg_reduce_size = Eigen::divup(nouter, static_cast<Toffsets>(nsegments));
  //  const Toffsets avg_reduce_size_pow2 = Toffsets(1) << Log2Ceiling64(avg_reduce_size);
  //  dim3 block(block_x, std::min(static_cast<Toffsets>(Eigen::divup(min_block_size, block_x)),
  //                               avg_reduce_size_pow2));
  //  dim3 grid(std::min(Eigen::divup(ninner_vec, static_cast<Toffsets>(block.x)),
  //                     static_cast<Toffsets>(kMaxGridX)),
  //            std::min(nsegments, static_cast<Tsegmentids>(kMaxGridY)));
  //  unsigned shared_memory_bytes = block.x * block.y * sizeof(Treducevec);
  SegmentReduceVectorKernel<Treducevec, Tvec, Toffsets, Tindices, Tsegmentids, ReduceOp,
                                   Tinit, Tweights>(
      /*grid, block, shared_memory_bytes, d.stream(),*/ nouter, ninner_vec, nsegments, reduce_op,
      initial_value, empty_segment_value, is_mean, is_sqrtn, input_vec, segment_offsets, indices,
      weights, output_vec);
  return debug;
}

template <typename Treducevec, typename Tvec, typename Toffsets, typename Tindices,
          typename Tsegmentids, typename ReduceOp, typename Tinit, typename Tweights>
Status SegmentReduceGPUImpl(OpKernelContext* ctx, Toffsets nouter, Toffsets ninner_vec,
                            Tsegmentids nsegments, ReduceOp reduce_op, Tinit initial_value,
                            Tinit empty_segment_value, bool is_mean, bool is_sqrtn,
                            const Tvec* input_vec,           // [nouter or any, ninner_vec]
                            const Tsegmentids* segment_ids,  // [nouter]
                            const Tindices* indices,         // [nouter] (optional)
                            const Tweights* weights,         // [nouter or any] (optional)
                            Tvec* output_vec) {              // [nsegments, ninner_vec]
  //  const GPUDevice& device = ctx->eigen_gpu_device();

  if (nouter == 0) {
    // TODO
    //  Just set output to empty_segment_value.
    //    GPUDevice d = ctx->template eigen_device<GPUDevice>();
    //    int64_t output_size = static_cast<int64_t>(nsegments) * ninner_vec;
    //    GpuLaunchConfig config = GetGpuLaunchConfig(output_size, d);
    //    return GpuLaunchKernel(SetToValue<Tvec, Tinit>, config.block_count,
    //                           config.thread_per_block, 0, d.stream(), output_size,
    //                           output_vec, empty_segment_value);
  }

  // Allocate and compute segment_offsets.
  //  Tensor segment_offsets;
  //  TF_RETURN_IF_ERROR(ctx->allocate_temp(DataTypeToEnum<Toffsets>::value,
  //                                        TensorShape({nsegments + 1}), &segment_offsets));
  //  Toffsets* segment_offsets_ptr = segment_offsets.flat<Toffsets>().data();
  //  TF_RETURN_IF_ERROR(
  //      LaunchSegmentOffsetsKernel(/*device,*/ nouter, nsegments, segment_ids,
  //      segment_offsets_ptr));
  std::vector<Toffsets> segment_offsets_ptr(nsegments + 1);
  LaunchSegmentOffsetsKernel<Toffsets, Tsegmentids>(/*device,*/ nouter, nsegments, segment_ids,
                                                    segment_offsets_ptr.data());
  if (debug) {
    std::cout << "SegmentReduceGPUImpl : =============" << std::endl;
    DisPlay("segment_offsets_ptr", segment_offsets_ptr.data(), nsegments + 1);
  }

  // inner case :TODO
  //  const Toffsets avg_reduce_size =
  //      Eigen::divup(nouter, static_cast<Toffsets>(nsegments));
  //  // This avg_reduce_size threshold is a performance heuristic.
  //  if (ninner_vec == 1 && avg_reduce_size >= 512) {
  //    // Here we use a gpuprim-based implementation that doesn't support an
  //    // inner dimension but can be significantly faster for large reductions.
  //    return SegmentReduceGPUImplNoInnerDim<Treducevec>(
  //        ctx, nouter, nsegments, reduce_op, initial_value, empty_segment_value,
  //        is_mean, is_sqrtn, input_vec, segment_offsets_ptr, indices, weights,
  //        output_vec);
  //  }
  // Here we use a custom kernel that is optimized for ninner_vec >= ~64 and
  // gives decent performance for smaller cases. It also handles indices,
  // casting to/from Treducevec, and normalizing the output.
  return LaunchSegmentReduceVectorKernel<Treducevec>(
      /*device, */ nouter, ninner_vec, nsegments, reduce_op, initial_value, empty_segment_value,
      is_mean, is_sqrtn, input_vec, segment_offsets_ptr.data(), indices, weights, output_vec);
}

template <typename Treduce, typename T, typename Toffsets, typename Tindices, typename Tsegmentids,
          typename ReduceOp, typename Tweights>
Status SegmentReduceGPU(OpKernelContext* ctx, Toffsets nouter, Toffsets ninner,
                        Tsegmentids nsegments, ReduceOp reduce_op, T initial_value,
                        T empty_segment_value, bool is_mean, bool is_sqrtn,
                        T* input,                  // [nouter or any, ninner]
                        Tsegmentids* segment_ids,  // [nouter]
                        Tindices* indices,         // [nouter] (optional)
                        Tweights* weights,         // [nouter or any] (optional)
                        T* output) {               // [nsegments, ninner]
  if (ninner == 0 || nsegments == 0) return OkStatus();
  if (debug) {
    DisPlay("input", input, nouter * ninner);
    DisPlay("segment_ids", segment_ids, nouter);
    DisPlay("indices", indices, nouter);
    std::cout << "MinAlignmentOf(input, output, ninner) : " << MinAlignmentOf(input, output, ninner)
              << std::endl;
  }

  return DispatchToVectorized<T, SegmentReduceGPUVectorized<Treduce>::template Impl>(
      MinAlignmentOf(input, output, ninner), ctx, nouter, ninner, nsegments, reduce_op,
      initial_value, empty_segment_value, is_mean, is_sqrtn, input, segment_ids, indices, weights,
      output);
}

}  // namespace segment

#endif  // HELLOTEST_SEGMENT_REDUCTION_H
