// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <Python.h>

#include <algorithm>
#include <cstdint>
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/imperative/amp_utils.h"
#include "paddle/fluid/pybind/tensor_py.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/common_infer_shape_functions.h"
#include "paddle/phi/kernels/funcs/slice_utils.h"
#include "paddle/phi/kernels/funcs/strided_slice.h"
#include "paddle/utils/pybind.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

using egr::ConvertAllInputsToDistTensor;
using egr::InputsContainDistTensor;

namespace py = pybind11;

namespace paddle {
namespace pybind {
static inline common::DDim infer_size_symdimvector(common::DDim a,
                                                   common::DDim b) {
  // Use ptrdiff_t to ensure signed comparison.
  auto dimsA = a.size();
  auto dimsB = b.size();
  auto ndim = dimsA > dimsB ? dimsA : dimsB;
  common::DDim expandedSizes = common::make_ddim(std::vector<int64_t>(ndim, 0));

  for (int64_t i = ndim - 1; i >= 0; --i) {
    int64_t offset = ndim - 1 - i;
    int64_t dimA = dimsA - 1 - offset;
    int64_t dimB = dimsB - 1 - offset;
    auto sizeA = (dimA >= 0) ? a[dimA] : 1;
    auto sizeB = (dimB >= 0) ? b[dimB] : 1;

    PADDLE_ENFORCE_EQ(
        sizeA == sizeB || sizeA == 1 || sizeB == 1,
        true,
        common::errors::Fatal("The size of tensor a (",
                              sizeA,
                              ") must match the size of tensor b (",
                              sizeB,
                              ") at non-singleton dimension ",
                              i));

    // 1s map to the other size (even 0).
    expandedSizes[i] = sizeA == 1 ? sizeB : sizeA;
  }

  return expandedSizes;
}

static inline paddle::Tensor expand_inplace(paddle::Tensor tensor,
                                            paddle::Tensor to_expand) {
  if (tensor.dims() == to_expand.dims()) {
    return to_expand;
  } else if (tensor.dims()[0] == to_expand.dims()[0]) {
    return expand_ad_func(to_expand, common::vectorize<int64_t>(tensor.dims()));
  } else {
    to_expand = squeeze_ad_func(to_expand, {-1});
    return expand_ad_func(to_expand, common::vectorize<int64_t>(tensor.dims()));
  }
}

static inline std::vector<paddle::Tensor> expandTensors(
    std::vector<paddle::Tensor> indices) {
  // expands bool to int tensors;
  std::vector<paddle::Tensor> result;
  for (auto& index : indices) {
    if (index.dtype() == paddle::DataType::BOOL) {
      auto bool_2_idx = nonzero_ad_func(index);
      for (int j = 0; j < index.dims().size(); j++) {
        paddle::Tensor sliced_tensor =
            slice_ad_func(bool_2_idx, {1}, {j}, {j + 1}, {1}, {1});
        result.emplace_back(sliced_tensor);
      }
    } else {
      result.emplace_back(index);
    }
  }
  return result;
}

static inline std::vector<paddle::Tensor> expand_outplace(
    std::vector<paddle::Tensor> to_expand) {
  // expands a list of Tensors; ignores undefined (null) tensors
  bool first = true;
  common::DDim sizes;
  for (size_t i = 0; i < to_expand.size(); i++) {
    if (!to_expand[i].defined()) {
      continue;
    } else if (first) {
      sizes = to_expand[i].dims();
      first = false;
    } else {
      sizes = infer_size_symdimvector(sizes, to_expand[i].dims());
    }
  }

  std::vector<paddle::Tensor> result(to_expand.size());
  for (size_t i = 0; i < to_expand.size(); i++) {
    if (!to_expand[i].defined()) {
      continue;
    } else if (to_expand[i].dims() == sizes) {
      result[i] = to_expand[i];
    } else {
      result[i] =
          expand_ad_func(to_expand[i], common::vectorize<int64_t>(sizes));
    }
  }
  return result;
}

struct AdvancedIndex {
  AdvancedIndex(paddle::Tensor src, std::vector<paddle::Tensor> indices);

  paddle::Tensor src;
  std::vector<paddle::Tensor> indices;
  std::vector<int64_t> indexed_sizes;
  std::vector<int64_t> indexed_strides;
  std::vector<int64_t> src_sizes;
  std::vector<int64_t> src_strides;
  int64_t dims_before;
  int64_t dims_after;
  bool bool_case;
};

inline static void restride_src(std::vector<int64_t>* shape,
                                std::vector<int64_t>* strides,
                                int64_t dims_before,
                                int64_t dims_indexed,
                                std::vector<int64_t> replacement_shape) {
  int64_t end = dims_before + dims_indexed;
  shape->erase(shape->begin() + dims_before, shape->begin() + end);
  strides->erase(strides->begin() + dims_before, strides->begin() + end);
  shape->insert(shape->begin() + dims_before,
                replacement_shape.begin(),
                replacement_shape.end());
  strides->insert(strides->begin() + dims_before, replacement_shape.size(), 0);
}

// move to cuda kernel
inline static std::vector<int64_t> reshape_indexer(paddle::Tensor* index,
                                                   int64_t dims_before,
                                                   int64_t dims_after) {
  auto orig_shape = common::vectorize<int64_t>(index->dims());
  auto shape = std::vector<int64_t>{};
  shape.insert(shape.end(), dims_before, 1);
  shape.insert(shape.end(), orig_shape.begin(), orig_shape.end());
  shape.insert(shape.end(), dims_after, 1);
  return shape;
}

inline AdvancedIndex::AdvancedIndex(paddle::Tensor src,
                                    std::vector<paddle::Tensor> indices_list) {
  uint32_t element_size_bytes = phi::SizeOf(src.dtype());
  int64_t dims_before = 0, dims_after = 0, dims_indexed = 0;
  std::vector<int64_t> shape_vec = common::vectorize<int64_t>(src.dims());
  std::vector<int64_t> stride_vec = common::vectorize<int64_t>(src.strides());
  std::vector<int64_t> replacement_shape;
  std::vector<int64_t> idx_shape_vec = {};
  std::vector<int64_t> idx_stride_vec = {};

  for (size_t dim = 0; dim < indices_list.size(); dim++) {
    if (!indices_list[dim].defined()) {
      if (dims_indexed == 0) {
        dims_before++;
      } else {
        dims_after++;
      }
    } else {
      dims_indexed++;
      replacement_shape = common::vectorize<int64_t>(indices_list[dim].dims());

      idx_shape_vec.push_back(shape_vec[dim]);
      idx_stride_vec.push_back(stride_vec[dim] * element_size_bytes);
    }
  }

  this->dims_before = dims_before;
  this->dims_after = dims_after;
  restride_src(
      &shape_vec, &stride_vec, dims_before, dims_indexed, replacement_shape);
  this->src_sizes = shape_vec;
  this->src_strides = stride_vec;

  this->indexed_sizes = idx_shape_vec;
  this->indexed_strides = idx_stride_vec;

  // use dims_before and dims_after / move to cuda kernel
  for (auto& index : indices_list) {
    if (index.defined()) {
      std::vector<int64_t> vec_size =
          reshape_indexer(&index, dims_before, dims_after);
      this->indices.push_back(index);
      this->indexed_sizes.push_back(-1);
      this->indexed_sizes.insert(
          this->indexed_sizes.end(), vec_size.begin(), vec_size.end());
    }
  }
}

template <typename T>
inline T GetDenseTensorValue(const phi::DenseTensor* x) {
  T value = static_cast<T>(0);
  if (!(x->place().GetType() == phi::AllocationType::CPU)) {
    phi::DenseTensor cpu_x;
    framework::TensorCopy(*x, phi::CPUPlace(), &cpu_x);
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
    phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
    const phi::DeviceContext* dev_ctx = pool.Get(x->place());
    dev_ctx->Wait();
#endif
    value = cpu_x.data<T>()[0];
  } else {
    value = x->data<T>()[0];
  }
  return value;
}
static Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj);
// Slice related methods
static bool PyCheckInteger(PyObject* obj) {
  return PyLong_Check(obj) && !PyBool_Check(obj);
}

static bool IsNumpyType(PyObject* obj) {
  // It is not a good way to judge the type of obj by its type'name. Maybe using
  // `PyArray_IsScalar` will be better. However, this interface cannot be used
  // by including pybind11, and it needs to compile with numpy.
  auto type_name = std::string(Py_TYPE(obj)->tp_name);
  return type_name == "numpy.int64" || type_name == "numpy.longlong" ||
         type_name == "numpy.int32" || type_name == "numpy.int16";
}

static bool IsNumpyArray(PyObject* obj) {
  auto type_name = std::string(Py_TYPE(obj)->tp_name);
  return type_name == "numpy.ndarray";
}

static Py_ssize_t GetSliceIndexFromTensor(const phi::DenseTensor& tensor) {
  if (tensor.numel() == 1) {
    if (framework::TransToProtoVarType(tensor.type()) ==
        framework::proto::VarType::INT32) {
      return static_cast<Py_ssize_t>(GetDenseTensorValue<int32_t>(&tensor));
    } else if (framework::TransToProtoVarType(tensor.type()) ==
               framework::proto::VarType::INT64) {
      return static_cast<Py_ssize_t>(GetDenseTensorValue<int64_t>(&tensor));
    } else {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Currently, the type of tensor in slice indices only allows "
          "int32 and int64, please check the type of index tensor."));
    }
  } else {
    PADDLE_THROW(common::errors::InvalidArgument(
        "Currently, tensor in slice indices only allows 1 element, "
        "but received %d.",
        tensor.numel()));
  }
}

// NOTE(zhiqiu): Revised version of PySlice_GetIndices. From:
// https://github.com/python/cpython/blob/8d21aa21f2cbc6d50aab3f420bb23be1d081dac4/Objects/sliceobject.c#L103
// Original PySlice_GetIndices return wrong result when
// slice_item contains long int, such as arr[:180L].
// NOT sure why this happens !!!
// Besides, PySlice_GetIndices cannot raise error when float in slice item.
// So, I make a revised version of PySlice_GetIndices, named to
// _PySlice_GetIndices. Try to use _PySlice_Unpack which is more robust than
// PySlice_GetIndices in the future.
static int _PySlice_GetIndices(PySliceObject* r,
                               Py_ssize_t length,
                               Py_ssize_t* start,
                               Py_ssize_t* stop,
                               Py_ssize_t* step) {
  /* XXX support long ints */
  if (r->step == Py_None) {
    *step = 1;
  } else {
    if (PyCheckInteger(r->step) || IsNumpyType(r->step)) {
      *step = PyLong_AsLong(r->step);
    } else if (PyCheckTensor(r->step)) {
      *step = GetSliceIndexFromPyObject(r->step);
    } else {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Currently, slice indices only allows None, integers, "
          "tensor(int) and numpy(int) in slice item, but received %s.",
          std::string(Py_TYPE(r->step)->tp_name)));
    }
  }
  if (r->start == Py_None) {
    *start = *step < 0 ? length - 1 : 0;
  } else {
    if (PyCheckInteger(r->start) || IsNumpyType(r->start)) {
      *start = PyLong_AsLong(r->start);
    } else if (PyCheckTensor(r->start)) {
      *start = GetSliceIndexFromPyObject(r->start);
    } else {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Currently, slice indices only allows None, integers, "
          "tensor(int) and numpy(int) in slice item, but received %s.",
          std::string(Py_TYPE(r->start)->tp_name)));
    }
  }
  if (r->stop == Py_None) {
    *stop = *step < 0 ? -length - 1 : length;
  } else {
    if (PyCheckInteger(r->stop) || IsNumpyType(r->stop)) {
      *stop = PyLong_AsLong(r->stop);
    } else if (PyCheckTensor(r->stop)) {
      *stop = GetSliceIndexFromPyObject(r->stop);
    } else {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Currently, slice indices only allows None, integers, "
          "tensor(int) and numpy(int) in slice item, but received %s.",
          std::string(Py_TYPE(r->stop)->tp_name)));
    }
  }

  // normalize start and stop
  bool dummy_zero_dim_out = false;
  phi::funcs::normalize_interval(
      *start, *stop, *step, length, start, stop, &dummy_zero_dim_out);
  // return value below seems to be useless...
  if (*stop > length) return -1;
  if (*start >= length) return -1;
  if (*step == 0) return -1;
  return 0;
}

static void ParseIndex(const paddle::Tensor& tensor,
                       PyObject* index,
                       std::vector<int64_t>* slice_axes,
                       std::vector<int64_t>* slice_starts,
                       std::vector<int64_t>* slice_ends,
                       std::vector<int64_t>* slice_strides,
                       std::vector<int64_t>* decrease_axis,
                       std::vector<int64_t>* none_axes,
                       std::vector<int64_t>* infer_flags,
                       std::vector<int>* advanced_index_dim,
                       std::vector<paddle::Tensor>* advanced_index,
                       bool* has_advanced_index,
                       bool* use_strided_slice) {
  // for case 0-size tensor in slice
  PADDLE_ENFORCE_EQ(
      tensor.defined(),
      true,
      common::errors::InvalidArgument("tensor has not been defined"));
  const auto& shape = tensor.dims();
  const int rank = shape.size();
  const int size = PyTuple_GET_SIZE(index);
  // Check Ellipsis is valid
  int specified_dims = 0;
  int ell_count = 0;
  for (int dim = 0; dim < size; ++dim) {
    PyObject* slice_item = PyTuple_GetItem(index, dim);
    if (slice_item == Py_Ellipsis) {
      ell_count++;
    } else if (slice_item != Py_None && !PyBool_Check(slice_item)) {
      specified_dims++;
    }
  }
  PADDLE_ENFORCE_LE(ell_count,
                    1,
                    common::errors::InvalidArgument(
                        "An index can only have a single ellipsis ('...')"));

  // deal with indexing_item
  int none_count = 0;
  for (int64_t i = 0, current_dim = 0, estimated_dim = 0; i < size; ++i) {
    PyObject* slice_item = PyTuple_GetItem(index, i);

    infer_flags->push_back(1);
    int64_t dim_len = shape[current_dim];
    if (PyCheckInteger(slice_item) || IsNumpyType(slice_item)) {
      // integer, PyLong_AsLong supports both int and long
      int64_t start = static_cast<int64_t>(PyLong_AsLong(slice_item));
      auto s_t = start;
      start = start < 0 ? start + dim_len : start;

      PADDLE_ENFORCE(
          0 <= start && start < dim_len,
          common::errors::OutOfRange("The starting index %d of slice is out "
                                     "of bounds in tensor %d-th axis, it "
                                     "should be in the range of [%d, %d).",
                                     s_t,
                                     current_dim,
                                     -dim_len,
                                     dim_len));

      slice_axes->push_back(current_dim);
      slice_starts->push_back(start);
      slice_ends->push_back(start + 1);
      slice_strides->push_back(1);
      decrease_axis->push_back(current_dim);
      current_dim++;
    } else if (PySlice_Check(slice_item)) {
      // slice item
      Py_ssize_t start, end, step;
      PySliceObject* p = reinterpret_cast<PySliceObject*>(slice_item);
      _PySlice_GetIndices(p, dim_len, &start, &end, &step);

      // :: or : or 0:dim_len:1
      if (start == 0 && end == dim_len && step == 1) {
        current_dim++;
        estimated_dim++;
        continue;
      }
      slice_axes->push_back(current_dim);
      slice_starts->push_back(start);
      slice_ends->push_back(end);
      slice_strides->push_back(step);
      estimated_dim++;
      current_dim++;

      if (step != 1) {
        *use_strided_slice = true;
      }
    } else if (slice_item == Py_Ellipsis) {
      current_dim += rank - specified_dims;
      estimated_dim += rank - specified_dims;
    } else if (slice_item == Py_None) {
      none_axes->push_back(current_dim + none_count);
      none_count++;
    } else if (PyBool_Check(slice_item)) {
      *has_advanced_index = true;
      none_axes->push_back(current_dim + none_count);
      none_count++;
      bool index_ele = (slice_item == Py_True);
      auto slice_tensor =
          full_ad_func({1}, index_ele, phi::DataType::BOOL, tensor.place());
      advanced_index->push_back(std::move(slice_tensor));
      (*advanced_index_dim)[estimated_dim] = estimated_dim;
      estimated_dim++;
    } else if (PyCheckTensor(slice_item) || IsNumpyArray(slice_item)) {
      paddle::Tensor slice_tensor;

      if (IsNumpyArray(slice_item)) {
        paddle::Tensor index_tensor_tmp(
            std::make_shared<phi::DenseTensor>(),
            egr::Controller::Instance().GenerateUniqueName());

        py::object index_obj_tmp =
            py::reinterpret_borrow<py::object>(slice_item);
        py::object index_tmp = index_obj_tmp;
        SetTensorFromPyArray(
            static_cast<phi::DenseTensor*>(index_tensor_tmp.impl().get()),
            index_tmp,
            tensor.place(),
            false);
        slice_tensor = index_tensor_tmp;

      } else {
        slice_tensor = CastPyArg2Tensor(slice_item, 0);
      }

      if (slice_tensor.shape().size() == 0) {
        if (slice_tensor.dtype() != phi::DataType::BOOL) {
          // 0-D int tensor is same with scalar
          PADDLE_ENFORCE_EQ(
              slice_tensor.is_dense_tensor(),
              true,
              common::errors::InvalidArgument(
                  "Now, Tensor in indexing only support DenseTensor."));
          Py_ssize_t s_t = GetSliceIndexFromTensor(
              (*static_cast<phi::DenseTensor*>(slice_tensor.impl().get())));
          auto start = s_t < 0 ? s_t + dim_len : s_t;

          PADDLE_ENFORCE(0 <= start && start < dim_len,
                         common::errors::OutOfRange(
                             "The starting index %d of slice is out "
                             "of bounds in tensor %d-th axis, it "
                             "should be in the range of [%d, %d).",
                             s_t,
                             current_dim,
                             -dim_len,
                             dim_len));

          slice_axes->push_back(current_dim);
          slice_starts->push_back(start);
          slice_ends->push_back(start + 1);
          slice_strides->push_back(1);
          decrease_axis->push_back(current_dim);
          current_dim++;
        } else {
          // 0-D bool Tensor, same as single PY-bool.
          *has_advanced_index = true;
          none_axes->push_back(current_dim + none_count);
          none_count++;
          slice_tensor = unsqueeze_ad_func(slice_tensor, {-1});
          advanced_index->push_back(std::move(slice_tensor));
          (*advanced_index_dim)[estimated_dim] = estimated_dim;
          estimated_dim++;
        }
      } else {
        *has_advanced_index = true;
        if (slice_tensor.dtype() == phi::DataType::BOOL) {
          // bool tensor consumes (rank of index tensor) dimensions of input
          // tensor
          for (size_t i = 0; i < slice_tensor.shape().size(); i++) {
            PADDLE_ENFORCE_EQ(slice_tensor.shape()[i],
                              dim_len,
                              common::errors::OutOfRange(
                                  "The shape of boolean index %d did not match "
                                  "indexed tensor %d along axis %d.",
                                  slice_tensor.shape()[0],
                                  dim_len,
                                  current_dim));
            (*advanced_index_dim)[estimated_dim] = estimated_dim;
            estimated_dim++;
            current_dim++;
            dim_len = shape[current_dim];
          }
        } else {
          // int tensor consumes only one dimension of input tensor
          (*advanced_index_dim)[estimated_dim] = estimated_dim;
          estimated_dim++;
          current_dim++;
        }
        advanced_index->push_back(std::move(slice_tensor));
      }

    } else {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Currently, Tensor.__indices__() only allows indexing "
          "by Boolean, Integers, Slices, Ellipsis, None, Tuples of these types "
          "and List / Tensor of Bool and Integers, but received "
          "%s in %dth slice item",
          std::string(Py_TYPE(slice_item)->tp_name),
          i + 1));
    }
  }

  // valid_index is the number of dimensions exclude None index
  const int valid_indices = size - none_axes->size() - ell_count;
  PADDLE_ENFORCE_EQ(valid_indices <= rank,
                    true,
                    common::errors::InvalidArgument(
                        "Too many indices (%d) for tensor of dimension %d.",
                        valid_indices,
                        rank));
}

static paddle::Tensor getTensorWithBasicIndexing(
    const paddle::Tensor& tensor,
    std::vector<int64_t>* slice_axes,
    std::vector<int64_t>* slice_starts,
    std::vector<int64_t>* slice_ends,
    std::vector<int64_t>* slice_strides,
    std::vector<int64_t>* decrease_axis,
    std::vector<int64_t>* none_axes,
    std::vector<int64_t>* infer_flags,
    bool* use_strided_slice,
    bool* out_is_view) {
  paddle::Tensor out;
  if (slice_axes->empty()) {
    out = tensor;
  } else {
    *out_is_view = true;
    if (!(*use_strided_slice)) {
      eager_gil_scoped_release guard;
      out = slice_ad_func(tensor,
                          *slice_axes,
                          *slice_starts,
                          *slice_ends,
                          *infer_flags,
                          *decrease_axis);
    } else {
      eager_gil_scoped_release guard;
      std::vector<int> slice_axes_int32(slice_axes->begin(), slice_axes->end());

      out = strided_slice_ad_func(
          tensor, slice_axes_int32, *slice_starts, *slice_ends, *slice_strides);
      if (!decrease_axis->empty()) {
        out = squeeze_ad_func(out, *decrease_axis);
      }
    }
  }
  if (!none_axes->empty()) {
    *out_is_view = true;
    eager_gil_scoped_release guard;
    // Deal with cases that decrease_axes is not empty
    // For example:
    // # x.shape: (2,3,4)
    // out = x[0, 0:2, None] # out.shape : (2, 1, 4)
    for (auto& axis : *(none_axes)) {
      int len = 0;
      for (int64_t da : *decrease_axis) {
        if (da < axis) {
          len++;
        }
      }
      axis -= len;
    }
    out = unsqueeze_ad_func(out, *none_axes);
  }
  return out;
}

inline static bool MaskedFillDispatching(
    const paddle::Tensor& tensor,
    const std::vector<paddle::Tensor>& indices,
    paddle::Tensor* mask_tensor,
    paddle::Tensor* value_tensor) {
  if (value_tensor->initialized() && value_tensor->numel() != 1) {
    return false;
  }
  if (indices.size() != 1) return false;

  int64_t num_ind = 0;
  if ((indices)[0].dtype() != phi::DataType::BOOL) {
    return false;
  } else {
    num_ind += (indices)[0].shape().size();
  }

  *mask_tensor = (indices)[0];
  for (size_t i = num_ind; i < tensor.shape().size(); i++) {
    *mask_tensor = unsqueeze_ad_func(*mask_tensor, {-1});
  }
  return true;
}

static paddle::Tensor dealWithAdvancedIndex(
    const paddle::Tensor& tensor,
    std::vector<int>* advanced_index_dim,
    std::vector<paddle::Tensor>* advanced_index,
    bool is_for_setitem,
    std::vector<paddle::Tensor>* transed_index,
    std::vector<int>* trans_back_dim,
    int* pos_of_new_dim,
    int* rank_of_new_dim,
    std::vector<int>* trans_dim,
    bool* out_is_view) {
  *rank_of_new_dim = 0;
  int p = 0;
  for (size_t i = 0; i < advanced_index_dim->size(); ++i) {
    auto index_dim = (*advanced_index_dim)[i];
    if (index_dim != -1) {
      // sum of each advanced_index_tensor's rank equals to number of non -1
      // element in advanced_index_dim
      auto index = (*advanced_index)[p++];

      if (index_dim == 0) {
        // case 1: advanced indices at axis 0, the new dim will be at first.
        *pos_of_new_dim = 0;
      } else if (index_dim > 0 && trans_dim->size() > 0 &&
                 (*trans_dim)[trans_dim->size() - 1] != index_dim - 1) {
        // case 2: there are not adjacent advanced indices, the new dim will
        // be at first.
        *pos_of_new_dim = 0;
      } else {
        *pos_of_new_dim = std::min(index_dim, *pos_of_new_dim);
      }

      if (index.dtype() == phi::DataType::BOOL) {
        *rank_of_new_dim = std::max(*rank_of_new_dim, 1);
        i--;
        for (size_t j = 0; j < index.shape().size(); j++) {
          i++;
          index_dim = (*advanced_index_dim)[i];
          trans_dim->push_back(index_dim);
        }
        transed_index->push_back(std::move(index));
      } else {
        *rank_of_new_dim =
            std::max(*rank_of_new_dim, static_cast<int>(index.shape().size()));

        trans_dim->push_back(index_dim);
        transed_index->push_back(std::move(index));
      }
    }
  }

  for (size_t i = 0; i < tensor.shape().size(); ++i) {
    if ((*advanced_index_dim)[i] == -1) {
      trans_dim->push_back(i);
    }
  }

  paddle::Tensor transed_tensor;

  // skip transform if the `trans_dim` is original order.
  std::vector<int> original_dim_order(tensor.shape().size());
  std::iota(original_dim_order.begin(), original_dim_order.end(), 0);

  if (original_dim_order == *trans_dim) {
    transed_tensor = tensor;
  } else {
    *out_is_view = true;
    if (FLAGS_use_stride_kernel && *pos_of_new_dim != 0) {
      transed_tensor = tensor;
    } else {
      transed_tensor = transpose_ad_func(tensor, *trans_dim);
    }
  }

  if (is_for_setitem) {
    trans_back_dim->resize(trans_dim->size());
    std::iota(trans_back_dim->begin(), trans_back_dim->end(), 0);
    std::sort(trans_back_dim->begin(),
              trans_back_dim->end(),
              [&trans_dim](int left, int right) {
                return (*trans_dim)[left] < (*trans_dim)[right];
              });
  }
  return transed_tensor;
}

static std::vector<paddle::Tensor> PrepareIndices(
    const paddle::Tensor& tensor,
    const paddle::Tensor& bool_2_idx,
    const paddle::Tensor& bool_index) {
  std::vector<paddle::Tensor> indices;
  for (int64_t j = 0; j < bool_2_idx.shape()[1]; ++j) {
    paddle::Tensor sliced_tensor =
        slice_ad_func(bool_2_idx, {1}, {j}, {j + 1}, {1}, {});
    paddle::Tensor sliced_tensor_c = sliced_tensor.contiguous();
    sliced_tensor_c.reshape({sliced_tensor.dims()[0]});
    indices.emplace_back(sliced_tensor_c);
  }
  return indices;
}

static paddle::Tensor getValueForBoolTensor(const paddle::Tensor& tensor,
                                            const paddle::Tensor& self_tensor,
                                            const paddle::Tensor& bool_index,
                                            const int64_t slice_offset,
                                            const int64_t pos_of_new_dim) {
  PADDLE_ENFORCE(bool_index.shape().size() <= tensor.shape().size(),
                 common::errors::InvalidArgument(
                     "The dims of bool index doesn't match indexed array, "
                     "the dims of bool index except to be equal or less "
                     "than %d, but received %d}.",
                     tensor.shape().size(),
                     bool_index.shape().size()));
  auto tensor_shape = tensor.shape();
  size_t i = 0;
  if (FLAGS_use_stride_kernel) {
    while (i < bool_index.shape().size()) {
      PADDLE_ENFORCE_EQ(
          bool_index.shape()[i],
          tensor_shape[i + pos_of_new_dim],
          common::errors::OutOfRange(
              "The dimension of bool index doesn't match indexed array along "
              "dimension %d, the target dimension is %d, but received %d",
              i,
              tensor_shape[i + pos_of_new_dim],
              bool_index.shape()[i]));
      i++;
    }
  } else {
    while (i < bool_index.shape().size()) {
      PADDLE_ENFORCE_EQ(
          bool_index.shape()[i],
          tensor_shape[i],
          common::errors::OutOfRange(
              "The dimension of bool index doesn't match indexed array along "
              "dimension %d, the target dimension is %d, but received %d",
              i,
              tensor_shape[i],
              bool_index.shape()[i]));
      i++;
    }
  }

  const phi::distributed::ProcessMesh* mesh = nullptr;
  if (InputsContainDistTensor(&mesh, tensor, self_tensor, bool_index)) {
    ConvertAllInputsToDistTensor(mesh, tensor, self_tensor, bool_index);
  }

  if (bool_index.shape().size() == tensor_shape.size()) {
    return masked_select_ad_func(tensor, bool_index);
  }

  auto bool_2_idx = nonzero_ad_func(bool_index);
  if (FLAGS_use_stride_kernel && self_tensor.is_contiguous()) {
    std::vector<paddle::Tensor> indices =
        PrepareIndices(tensor, bool_2_idx, bool_index);
    for (int i = 0; i < pos_of_new_dim; ++i) {
      indices.insert(indices.begin(), paddle::Tensor());
    }
    while (indices.size() < static_cast<size_t>(tensor.dims().size())) {
      indices.emplace_back(paddle::Tensor());
    }

    std::vector<paddle::Tensor> indices_int64;
    for (auto& indice : indices) {
      if (indice.defined() && indice.dtype() == paddle::DataType::INT32) {
        indice = indice.cast(paddle::DataType::INT64);  // int32 -> int64
      }
      indices_int64.push_back(indice);
    }

    // AMP Logic
    if (egr::Controller::Instance().GetAMPLevel() !=
        paddle::imperative::AmpLevel::O0) {
      auto op_name = phi::TransToFluidOpName("index_elementwise_get");
      paddle::small_vector<std::vector<paddle::Tensor>,
                           egr::kSlotSmallVectorSize>
          amp_tensors_vector = {{self_tensor}};

      auto amp_dst_dtype =
          paddle::imperative::GetAmpDestDtype(op_name, amp_tensors_vector);

      auto new_self_tensor = paddle::imperative::AmpAutoCast(
          "self_tensor", self_tensor, amp_dst_dtype, op_name);
      auto new_tensor = paddle::imperative::AmpAutoCast(
          "tensor", tensor, amp_dst_dtype, op_name);

      {
        paddle::imperative::AutoCastGuard guard(
            egr::Controller::Instance().GetCurrentAmpAttrs(),
            paddle::imperative::AmpLevel::O0);

        AdvancedIndex ad = AdvancedIndex(new_tensor, indices_int64);
        const bool is_combined = false;
        const bool accumulate = false;

        return index_elementwise_get_ad_func(new_self_tensor,
                                             ad.indices,
                                             ad.src_sizes,
                                             ad.src_strides,
                                             ad.indexed_sizes,
                                             ad.indexed_strides,
                                             slice_offset,
                                             accumulate,
                                             is_combined);
      }
    }

    AdvancedIndex ad = AdvancedIndex(tensor, indices_int64);
    const bool is_combined = false;
    const bool accumulate = false;

    return index_elementwise_get_ad_func(self_tensor,
                                         ad.indices,
                                         ad.src_sizes,
                                         ad.src_strides,
                                         ad.indexed_sizes,
                                         ad.indexed_strides,
                                         slice_offset,
                                         accumulate,
                                         is_combined);
  } else {
    if (bool_index.shape().size() == 1)
      return gather_ad_func(tensor, bool_2_idx);

    return gather_nd_ad_func(tensor, bool_2_idx);
  }
}

static void ParseBoolAndBroadcastIndices(
    std::vector<paddle::Tensor>* advanced_index) {
  for (size_t i = 0; i < advanced_index->size(); i++) {
    if ((*advanced_index)[i].dtype() == phi::DataType::BOOL) {
      paddle::Tensor bool_2_idx = nonzero_ad_func((*advanced_index)[i]);
      paddle::Tensor bool_2_idx_sliced =
          slice_ad_func(bool_2_idx, {1}, {0}, {1}, {1}, {1});
      (*advanced_index)[i] = bool_2_idx_sliced;
    }
  }
  if (advanced_index->size() > 1) {
    bool need_broadcast = false;
    common::DDim common_shape = common::make_ddim((*advanced_index)[0].shape());
    for (size_t i = 1; i < advanced_index->size(); ++i) {
      common::DDim current_shape =
          common::make_ddim((*advanced_index)[i].shape());
      if (current_shape != common_shape) {
        need_broadcast = true;
        common_shape =
            phi::funcs::BroadcastTwoDims(current_shape, common_shape, -1);
      }
    }

    if (need_broadcast) {
      // Here advanced_index has been checked ContainDistTensor
      // and transed in dealWithAdvancedIndex
      auto common_shape_vec = common::vectorize<int64_t>(common_shape);
      for (size_t i = 0; i < advanced_index->size(); ++i) {
        auto current_shape = (*advanced_index)[i].shape();
        if (current_shape != common_shape_vec) {
          (*advanced_index)[i] =
              expand_ad_func((*advanced_index)[i], common_shape_vec);
        }
      }
    }
  }
}

static paddle::Tensor dealWithValues(const paddle::Tensor& tensor,
                                     PyObject* value_obj,
                                     std::vector<phi::Scalar>* values,
                                     const bool trans_to_tensor) {
  paddle::Tensor value_tensor;
  if (PyCheckTensor(value_obj)) {
    value_tensor = reinterpret_cast<TensorObject*>(value_obj)->tensor;
  } else if (py::isinstance<py::array>(value_obj)) {
    paddle::Tensor value_tensor_tmp(
        std::make_shared<phi::DenseTensor>(),
        egr::Controller::Instance().GenerateUniqueName());
    py::object value_obj_tmp = py::reinterpret_borrow<py::object>(value_obj);
    py::object value = value_obj_tmp;
    if (tensor.dtype() == phi::DataType::FLOAT32) {
      if (!py::isinstance<py::array_t<float>>(value_obj_tmp)) {
        value = pybind11::detail::CastNumpyArray<float>(value_obj_tmp);
      }
    } else if (tensor.dtype() == phi::DataType::FLOAT64) {
      if (!py::isinstance<py::array_t<double>>(value_obj_tmp)) {
        value = pybind11::detail::CastNumpyArray<double>(value_obj_tmp);
      }
    } else if (tensor.dtype() == phi::DataType::INT32) {
      if (!py::isinstance<py::array_t<int32_t>>(value_obj_tmp)) {
        value = pybind11::detail::CastNumpyArray<int32_t>(value_obj_tmp);
      }
    } else if (tensor.dtype() == phi::DataType::INT64) {
      if (!py::isinstance<py::array_t<int64_t>>(value_obj_tmp)) {
        value = pybind11::detail::CastNumpyArray<int64_t>(value_obj_tmp);
      }
    } else if (tensor.dtype() == phi::DataType::BOOL) {
      if (!py::isinstance<py::array_t<bool>>(value_obj_tmp)) {
        value = pybind11::detail::CastNumpyArray<bool>(value_obj_tmp);
      }
    } else if (tensor.dtype() == phi::DataType::COMPLEX64) {
      if (!py::isinstance<py::array_t<std::complex<float>>>(value_obj_tmp)) {
        value = pybind11::detail::CastNumpyArray<std::complex<float>>(
            value_obj_tmp);
      }
    } else if (tensor.dtype() == phi::DataType::COMPLEX128) {
      if (!py::isinstance<py::array_t<std::complex<double>>>(value_obj_tmp)) {
        value = pybind11::detail::CastNumpyArray<std::complex<double>>(
            value_obj_tmp);
      }
    } else {
      PADDLE_THROW(common::errors::InvalidArgument(
          "When assign a numpy.np value to a paddle.Tensor, "
          "the data type of the paddle.Tensor must be bool, "
          "float32, float64, complex64, complex128, int32 or int64, "
          "please check the type of tensor."));
    }
    SetTensorFromPyArray(
        static_cast<phi::DenseTensor*>(value_tensor_tmp.impl().get()),
        value,
        tensor.place(),
        false);
    value_tensor = value_tensor_tmp;
  } else {
    py::object value_obj_tmp = py::reinterpret_borrow<py::object>(value_obj);
    // convert the value to self data type
    if (py::isinstance<py::float_>(value_obj_tmp) ||
        py::isinstance<py::int_>(value_obj_tmp) ||
        py::isinstance<py::bool_>(value_obj_tmp) ||
        PyComplex_Check(value_obj)) {
      if (tensor.dtype() == phi::DataType::FLOAT32 ||
          tensor.dtype() == phi::DataType::FLOAT16 ||
          tensor.dtype() == phi::DataType::BFLOAT16) {
        values->push_back(value_obj_tmp.cast<float>());
      } else if (tensor.dtype() == phi::DataType::FLOAT64) {
        values->push_back(value_obj_tmp.cast<double>());
      } else if (tensor.dtype() == phi::DataType::INT32 ||
                 tensor.dtype() == phi::DataType::INT16 ||
                 tensor.dtype() == phi::DataType::INT8 ||
                 tensor.dtype() == phi::DataType::UINT8) {
        values->push_back(value_obj_tmp.cast<float>());
      } else if (tensor.dtype() == phi::DataType::INT64) {
        values->push_back(value_obj_tmp.cast<double>());
      } else if (tensor.dtype() == phi::DataType::BOOL) {
        values->push_back(value_obj_tmp.cast<bool>());
      } else if (tensor.dtype() == phi::DataType::COMPLEX64) {
        values->push_back(value_obj_tmp.cast<std::complex<float>>());
      } else if (tensor.dtype() == phi::DataType::COMPLEX128) {
        values->push_back(value_obj_tmp.cast<std::complex<double>>());
      }
    } else {
      PADDLE_THROW(common::errors::InvalidArgument(
          "Value type error. The assign value allows "
          "Tensor, numpy.ndarray, integer, float, complex or bool, "
          "but received %s.",
          Py_TYPE(value_obj)));
    }

    if (trans_to_tensor && (*values).size() > 1) {
      value_tensor =
          full_ad_func({1}, (*values)[0], tensor.dtype(), tensor.place());
    }
  }
  return value_tensor;
}

static void DealWithIndex(const int pos_of_new_dim,
                          int64_t* slice_offset,
                          std::vector<paddle::Tensor>* transed_index,
                          paddle::Tensor* tensor,
                          paddle::Tensor* sub_tensor,
                          paddle::Tensor* transed_sub_tensor,
                          std::vector<paddle::Tensor>* transed_index_int64) {
  for (int i = 0; i < pos_of_new_dim; ++i) {
    transed_index->insert(transed_index->begin(), paddle::Tensor());
  }
  while (transed_index->size() <
         static_cast<size_t>(transed_sub_tensor->dims().size())) {
    transed_index->emplace_back(paddle::Tensor());
  }
  *slice_offset =
      static_cast<int64_t>(reinterpret_cast<char*>(sub_tensor->data()) -
                           reinterpret_cast<char*>(tensor->data()));

  for (auto& indice : *transed_index) {
    if (indice.defined() && indice.dtype() == paddle::DataType::INT32) {
      indice = indice.cast(paddle::DataType::INT64);  // int32 -> int64
    }
    transed_index_int64->push_back(indice);
  }
}

static inline paddle::Tensor expand_inplace(paddle::Tensor* tensor,
                                            paddle::Tensor* to_expand) {
  if (tensor->dims() == to_expand->dims()) {
    return *to_expand;
  } else if (tensor->dims()[0] == to_expand->dims()[0]) {
    return expand_ad_func(*to_expand,
                          common::vectorize<int64_t>(tensor->dims()));
  } else {
    *to_expand = squeeze_ad_func(*to_expand, {-1});
    return expand_ad_func(*to_expand,
                          common::vectorize<int64_t>(tensor->dims()));
  }
}

static void DispatchSetitemKernel(const int pos_of_new_dim,
                                  bool* out_is_view,
                                  std::vector<paddle::Tensor>* transed_index,
                                  paddle::Tensor* tensor,
                                  paddle::Tensor* sub_tensor,
                                  paddle::Tensor* transed_sub_tensor,
                                  paddle::Tensor* value_tensor,
                                  std::vector<phi::Scalar>* values) {
  paddle::Tensor mask_tensor;
  if (MaskedFillDispatching(
          *transed_sub_tensor, *transed_index, &mask_tensor, value_tensor)) {
    if (value_tensor->initialized()) {
      if (!*out_is_view) {
        *transed_sub_tensor = masked_fill__ad_func(
            *transed_sub_tensor, mask_tensor, *value_tensor);
        return;
      }
    } else {
      if (*out_is_view) {
        mask_tensor = expand_inplace(transed_sub_tensor, &mask_tensor);
        int64_t slice_offset = static_cast<int64_t>(
            reinterpret_cast<char*>(transed_sub_tensor->data()) -
            reinterpret_cast<char*>(tensor->data()));
        *transed_sub_tensor = index_elementwise_put__ad_func(
            *tensor,
            {mask_tensor},
            (*values)[0],
            common::vectorize<int64_t>(transed_sub_tensor->dims()),
            common::vectorize<int64_t>(transed_sub_tensor->strides()),
            common::vectorize<int64_t>(mask_tensor.dims()),
            common::vectorize<int64_t>(mask_tensor.strides()),
            slice_offset);
        *out_is_view = false;
        return;
      } else {
        paddle::Tensor value_tmp_tensor =
            full_ad_func({1}, (*values)[0], tensor->dtype(), tensor->place());
        *transed_sub_tensor = masked_fill__ad_func(
            *transed_sub_tensor, mask_tensor, value_tmp_tensor);
        return;
      }
    }
  }
  if (FLAGS_use_stride_kernel) {
    if (value_tensor->initialized()) {
      *transed_index = expandTensors(*transed_index);
      *transed_index = expand_outplace(*transed_index);

      std::vector<paddle::Tensor> transed_index_int64;
      int64_t slice_offset;

      DealWithIndex(pos_of_new_dim,
                    &slice_offset,
                    transed_index,
                    tensor,
                    sub_tensor,
                    transed_sub_tensor,
                    &transed_index_int64);

      AdvancedIndex ad =
          AdvancedIndex(*transed_sub_tensor, transed_index_int64);
      PADDLE_ENFORCE_EQ(
          phi::funcs::CheckIsDimsMatchBool(common::make_ddim(ad.src_sizes),
                                           value_tensor->dims()),
          true,
          common::errors::InvalidArgument(
              "shape mismatch: value tensor of shape %s cannot be "
              "broadcast to indexing result of shape %s.",
              value_tensor->dims().to_str(),
              common::make_ddim(ad.src_sizes).to_str()));
      *transed_sub_tensor =
          index_elementwise_put_with_tensor__ad_func(*tensor,
                                                     ad.indices,
                                                     *value_tensor,
                                                     ad.src_sizes,
                                                     ad.src_strides,
                                                     ad.indexed_sizes,
                                                     ad.indexed_strides,
                                                     slice_offset);
      // New kernel does not need to transpose back, so set out_is_view to
      // false. Remove when all cases use this branch.
      *out_is_view = false;
    } else {
      *transed_index = expandTensors(*transed_index);
      *transed_index = expand_outplace(*transed_index);

      std::vector<paddle::Tensor> transed_index_int64;
      int64_t slice_offset;

      DealWithIndex(pos_of_new_dim,
                    &slice_offset,
                    transed_index,
                    tensor,
                    sub_tensor,
                    transed_sub_tensor,
                    &transed_index_int64);

      AdvancedIndex ad =
          AdvancedIndex(*transed_sub_tensor, transed_index_int64);
      *transed_sub_tensor = index_elementwise_put__ad_func(*tensor,
                                                           ad.indices,
                                                           (*values)[0],
                                                           ad.src_sizes,
                                                           ad.src_strides,
                                                           ad.indexed_sizes,
                                                           ad.indexed_strides,
                                                           slice_offset);
      // New kernel does not need to transpose back, so set out_is_view to
      // false. Remove when all cases use this branch.
      *out_is_view = false;
    }
  } else {
    // TODO(czy): remove in the future
    if (value_tensor->initialized()) {
      *transed_sub_tensor = index_put__ad_func(
          *transed_sub_tensor, *transed_index, *value_tensor);
    } else {
      paddle::Tensor value_tmp_tensor =
          full_ad_func({1}, (*values)[0], tensor->dtype(), tensor->place());
      *transed_sub_tensor = index_put__ad_func(
          *transed_sub_tensor, *transed_index, value_tmp_tensor);
    }
  }
}

static void ApplySetitem(const std::vector<int> trans_dim,
                         const int pos_of_new_dim,
                         bool* out_is_view,
                         std::vector<paddle::Tensor>* transed_index,
                         paddle::Tensor* tensor,
                         paddle::Tensor* self_tensor,
                         paddle::Tensor* sub_tensor,
                         paddle::Tensor* transed_sub_tensor,
                         paddle::Tensor* value_tensor,
                         std::vector<phi::Scalar>* values) {
  if (!value_tensor->initialized() && (*values).size() == 0) return;
  if (value_tensor->initialized()) {
    if (self_tensor->dtype() != value_tensor->dtype()) {
      if (egr::Controller::Instance().GetAMPLevel() !=
          paddle::imperative::AmpLevel::O0) {
        paddle::small_vector<std::vector<paddle::Tensor>,
                             egr::kSlotSmallVectorSize>
            tmps = {{*self_tensor}, {*value_tensor}};
        auto amp_dtype = paddle::imperative::GetAmpDestDtype("index_put", tmps);
        *self_tensor = paddle::imperative::AmpAutoCast(
            self_tensor->name(), *self_tensor, amp_dtype, "index_put");
        *value_tensor = paddle::imperative::AmpAutoCast(
            value_tensor->name(), *value_tensor, amp_dtype, "index_put");
      }
      if (self_tensor->dtype() != value_tensor->dtype()) {
        *value_tensor = cast_ad_func(*value_tensor, self_tensor->dtype());
      }
    }

    if (value_tensor->dims().size() > 1 && pos_of_new_dim != 0) {
      if (!FLAGS_use_stride_kernel) {
        *value_tensor = transpose_ad_func(*value_tensor, trans_dim);
      }
    }

    const phi::distributed::ProcessMesh* mesh = nullptr;
    if (InputsContainDistTensor(
            &mesh, *self_tensor, *transed_sub_tensor, *value_tensor)) {
      ConvertAllInputsToDistTensor(
          mesh, *self_tensor, *transed_sub_tensor, *value_tensor);
    }

    DispatchSetitemKernel(pos_of_new_dim,
                          out_is_view,
                          transed_index,
                          tensor,
                          sub_tensor,
                          transed_sub_tensor,
                          value_tensor,
                          values);

  } else {
    const phi::distributed::ProcessMesh* mesh = nullptr;
    if (InputsContainDistTensor(&mesh, *self_tensor, *transed_sub_tensor)) {
      ConvertAllInputsToDistTensor(mesh, *self_tensor, *transed_sub_tensor);
    }

    DispatchSetitemKernel(pos_of_new_dim,
                          out_is_view,
                          transed_index,
                          tensor,
                          sub_tensor,
                          transed_sub_tensor,
                          value_tensor,
                          values);
  }
}

static void ApplyGetitem(const int index_size,
                         const int pos_of_new_dim,
                         const int rank_of_new_dim,
                         std::vector<paddle::Tensor>* transed_index,
                         paddle::Tensor* tensor,
                         paddle::Tensor* self_tensor,
                         paddle::Tensor* sub_tensor,
                         paddle::Tensor* transed_tensor,
                         paddle::Tensor* out) {
  auto handle_transpose = [&](Tensor& out) {
    if (pos_of_new_dim != 0) {
      std::vector<int> perm(out.shape().size(), 0);
      int tmp1 = rank_of_new_dim, tmp2 = 0,
          tmp3 = pos_of_new_dim + rank_of_new_dim;
      for (int i = 0; i < static_cast<int>(out.shape().size()); ++i) {
        if (i < pos_of_new_dim) {
          perm[i] = tmp1++;
        } else if (i >= pos_of_new_dim &&
                   i < pos_of_new_dim + rank_of_new_dim) {
          perm[i] = tmp2++;
        } else {
          perm[i] = tmp3++;
        }
      }
      out = transpose_ad_func(out, perm);
    }
  };

  if (transed_index->size() == 1 &&
      (*transed_index)[0].dtype() == phi::DataType::BOOL) {
    // get value for bool tensor
    const int64_t slice_offset =
        reinterpret_cast<const char*>(transed_tensor->data()) -
        reinterpret_cast<const char*>(self_tensor->data());
    *out = getValueForBoolTensor(*transed_tensor,
                                 (*self_tensor),
                                 (*transed_index)[0],
                                 slice_offset,
                                 pos_of_new_dim);
    if (!FLAGS_use_stride_kernel) {
      handle_transpose(*out);
    }
    return;
  } else {
    // get value for int tensor
    ParseBoolAndBroadcastIndices(transed_index);
    bool has_empty_index = false;
    for (const auto& tmp_tensor : *transed_index) {
      if (!tmp_tensor.initialized()) {
        has_empty_index = true;
        break;
      }
    }

    if (FLAGS_use_stride_kernel && !has_empty_index &&
        self_tensor->is_contiguous()) {
      const phi::distributed::ProcessMesh* mesh = nullptr;
      if (InputsContainDistTensor(
              &mesh, *self_tensor, *transed_tensor, *transed_index)) {
        ConvertAllInputsToDistTensor(
            mesh, *self_tensor, *transed_tensor, *transed_index);
      }

      *transed_index = expandTensors(*transed_index);
      *transed_index = expand_outplace(*transed_index);

      std::vector<paddle::Tensor> transed_index_int64;
      int64_t slice_offset;

      DealWithIndex(pos_of_new_dim,
                    &slice_offset,
                    transed_index,
                    tensor,
                    sub_tensor,
                    transed_tensor,
                    &transed_index_int64);

      // AMP Logic
      if (egr::Controller::Instance().GetAMPLevel() !=
          paddle::imperative::AmpLevel::O0) {
        auto op_name = phi::TransToFluidOpName("index_elementwise_get");
        paddle::small_vector<std::vector<paddle::Tensor>,
                             egr::kSlotSmallVectorSize>
            amp_tensors_vector = {{*self_tensor}};

        auto amp_dst_dtype =
            paddle::imperative::GetAmpDestDtype(op_name, amp_tensors_vector);

        auto new_self_tensor = paddle::imperative::AmpAutoCast(
            "self_tensor", *self_tensor, amp_dst_dtype, op_name);
        auto new_transed_tensor = paddle::imperative::AmpAutoCast(
            "transed_tensor", *transed_tensor, amp_dst_dtype, op_name);

        {
          paddle::imperative::AutoCastGuard guard(
              egr::Controller::Instance().GetCurrentAmpAttrs(),
              paddle::imperative::AmpLevel::O0);

          AdvancedIndex ad =
              AdvancedIndex(new_transed_tensor, transed_index_int64);

          const bool is_combined = (index_size == 1) ? false : true;
          const bool accumulate = true;
          *out = index_elementwise_get_ad_func(new_self_tensor,
                                               ad.indices,
                                               ad.src_sizes,
                                               ad.src_strides,
                                               ad.indexed_sizes,
                                               ad.indexed_strides,
                                               slice_offset,
                                               accumulate,
                                               is_combined);
        }
        return;
      }

      AdvancedIndex ad = AdvancedIndex(*transed_tensor, transed_index_int64);
      // is_combined:
      //   Distinguishes between regular indexing (single index) and combined
      //   indexing (multiple indices). When false (single index case), enables
      //   optimized backward pass using IndexPutWithSortKernel for better
      //   performance.
      const bool is_combined = (index_size == 1) ? false : true;
      const bool accumulate = true;
      *out = index_elementwise_get_ad_func(*self_tensor,
                                           ad.indices,
                                           ad.src_sizes,
                                           ad.src_strides,
                                           ad.indexed_sizes,
                                           ad.indexed_strides,
                                           slice_offset,
                                           accumulate,
                                           is_combined);
      return;
    } else {
      paddle::Tensor transed_advanced_index_tensor;
      if (transed_index->size() > 1) {
        transed_advanced_index_tensor = stack_ad_func(*transed_index, -1);
      } else {
        // fast path for single index tensor, since stack is much slower than
        // unsqueeze
        transed_advanced_index_tensor =
            unsqueeze_ad_func((*transed_index)[0], {-1});
      }

      const phi::distributed::ProcessMesh* mesh = nullptr;
      if (InputsContainDistTensor(
              &mesh, *transed_tensor, transed_advanced_index_tensor)) {
        ConvertAllInputsToDistTensor(
            mesh, *transed_tensor, transed_advanced_index_tensor);
      }
      *out = gather_nd_ad_func(*transed_tensor, transed_advanced_index_tensor);
      handle_transpose(*out);
      return;
    }
  }
  handle_transpose(*out);
}

}  // namespace pybind
}  // namespace paddle
