//===- dis_scatter_backward_impl.hpp --------------------------------------- C++ ---===//
//
// Copyright 2025 ByteDance Ltd. and/or its affiliates. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//===----------------------------------------------------------------------===//

#pragma once
#include "flux/flux.h"
#include <nvshmemx.h>
#include <nvshmem.h>
namespace bytedance {
namespace flux {

struct DisScatterBackwardBuildIndexParams {
  int32_t *ag_exp_indices;
  int32_t *ag_scatter_idx;
  int32_t total_num_experts;
  int32_t rank;
  int32_t world_size;
  int32_t local_world_size;
  int32_t n_nodes;
  int32_t max_token_per_rank;
  int32_t topk;
  // rank / ep_ranks_per_node
  int32_t cur_node_id;
  // global_token_start/end stores the offset the first and last token
  // of the corresponding rank who has the same local rank as current gpu
  int32_t global_token_start[kMaxNodes];
  int32_t global_token_end[kMaxNodes];
  int32_t ep_cum_sum[kMaxLocalWorldSize];

  // for forward_gpu, same as ep_cum_sum.
  int32_t *ep_cum_sum_gpu_ptr;
  int32_t *ep_token_cum_sum_gpu_ptr;

  // following are outputs
  //[n_node]
  int32_t *block_count_send;
  //[n_node, max_token_per_rank]
  int32_t *block_idx_send;
  //[n_node, max_token_per_rank]
  int32_t *block_n_tokens;
  //[n_node, max_token_per_rank]
  int32_t *token_idx_send;
  //[n_node, max_token_per_rank]
  int32_t *token_topk_count;
  //[n_node, max_token_per_rank, topk]
  int32_t *token_scatterd_pos;
  //[n_node, max_token_per_rank, topk]
  int32_t *token_scatterd_local_rank;
  //[max_token, topk+1]
  int32_t *reduce_token_idx;

  __device__ __forceinline__ int32_t
  get_global_token_start(int32_t node_id) const {
    if (ep_token_cum_sum_gpu_ptr != nullptr) {
      int32_t local_rank = rank % local_world_size;
      int32_t target_rank = local_rank + node_id * local_world_size;
      return ep_token_cum_sum_gpu_ptr[target_rank];
    } else {
      return global_token_start[node_id];
    }
  }

  __device__ __forceinline__ int32_t
  get_global_token_end(int32_t node_id) const {
    if (ep_token_cum_sum_gpu_ptr != nullptr) {
      int32_t local_rank = rank % local_world_size;
      int32_t target_rank = local_rank + node_id * local_world_size;
      return ep_token_cum_sum_gpu_ptr[target_rank + 1];
    } else {
      return global_token_end[node_id];
    }
  }
};

struct DisScatterBackwardBuildIndexSingleNodeParams {
  int32_t *ag_exp_indices;
  int32_t *ag_scatter_idx;
  int32_t total_num_experts;
  int32_t rank;
  int32_t world_size;
  int32_t local_world_size;
  int32_t max_token_per_rank;
  int32_t topk;
  // global_token_start/end stores the offset the first and last token
  // of the corresponding rank who has the same local rank as current gpu
  int32_t global_token_start[kMaxLocalWorldSize];
  int32_t global_token_end[kMaxLocalWorldSize];
  int32_t ep_cum_sum[kMaxLocalWorldSize];

  // for forward_gpu, same as ep_cum_sum.
  int32_t *ep_cum_sum_gpu_ptr;
  int32_t *ep_token_cum_sum_gpu_ptr;

  // following are outputs
  //[local_world_size]
  int32_t *token_count_send;
  //[local_world_size, max_token_per_rank]
  int32_t *token_idx_send;
  //[local_world_size, max_token_per_rank, topk+1] // for data dedup before send
  int32_t *token_local_reduce_pos;
  //[max_token, topk+1]
  int32_t *reduce_token_idx;
  __device__ __forceinline__ int32_t
  get_global_token_start(int32_t target_rank) const {
    return ep_token_cum_sum_gpu_ptr != nullptr ? ep_token_cum_sum_gpu_ptr[target_rank]
                                               : global_token_start[target_rank];
  }

  __device__ __forceinline__ int32_t
  get_global_token_end(int32_t target_rank) const {
    return ep_token_cum_sum_gpu_ptr != nullptr ? ep_token_cum_sum_gpu_ptr[target_rank + 1]
                                               : global_token_end[target_rank];
  }
};

struct DisScatterBackwardParams {
  void *internal_ptrs[kMaxLocalWorldSize];  // input_ptrs[rank]: (m * world_size, n)
  void *output_ptrs[kMaxLocalWorldSize];
  void *reduction_buffer;  // input_ptrs[rank]: (m * world_size, n)
  void *result_buffer;
  int32_t hidden_dim;
  int32_t n_threadblocks;
  int32_t global_rank_offset;
  DisScatterBackwardBuildIndexParams index_args;
};

struct DisScatterBackwardSingleNodeParams {
  void *input_ptr;
  void *output_ptrs[kMaxLocalWorldSize];
  void *result_buffer;
  int32_t hidden_dim;
  int32_t n_threadblocks;
  int32_t global_rank_offset;
  DisScatterBackwardBuildIndexSingleNodeParams index_args;
};

void dis_scatter_backward_impl(const DisScatterBackwardParams &params, cudaStream_t stream);
void dis_scatter_backward_build_index_impl(
    const DisScatterBackwardBuildIndexParams &params, cudaStream_t stream);
void dis_scatter_backward_single_node_impl(
    const DisScatterBackwardSingleNodeParams &params, cudaStream_t stream);
void dis_scatter_backward_build_index_single_node_impl(
    const DisScatterBackwardBuildIndexSingleNodeParams &params, cudaStream_t stream);
void topk_reduce_impl(
    void *input, void *output, int32_t *reduce_idx, int M, int N, int topk, cudaStream_t stream);
}  // namespace flux
}  // namespace bytedance
