// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/common/macros.h"
#include "paddle/fluid/eager/grad_node_info.h"

namespace egr {

/**
 * Input Buffer is designed for backward grad accumulate.
 * Since we will have one output used by multi preceding ops in forward pass,
 * we will meet a problem that we need to accumulate multiple grads into one.
 *
 * GradTensorHolder should have as same format as forward output **/
class GradTensorHolder {
 public:
  explicit GradTensorHolder(
      const paddle::small_vector<std::vector<GradSlotMeta>,
                                 kSlotSmallVectorSize>& metas,
      bool record_input_dtypes = true) {
    VLOG(7) << "Init GradTensorHolder with meta size: " << metas.size();
    buffer_.resize(metas.size());
    input_dtypes_.resize(metas.size());
    for (size_t i = 0; i < buffer_.size(); i++) {
      VLOG(7) << "Init GradTensorHolder with meta rank: " << metas[i].size();
      buffer_[i].resize(metas[i].size());
      input_dtypes_[i].resize(metas[i].size());

      // Extract only dtype information from metas
      for (size_t j = 0; j < metas[i].size(); j++) {
        const auto& meta = metas[i][j];
        if (meta.HasTensorMeta() && record_input_dtypes) {
          const auto& tensor_meta = meta.GetTensorMeta();
          input_dtypes_[i][j] = tensor_meta.dtype;
          VLOG(7) << "Init GradTensorHolder with dtype: "
                  << phi::DataTypeToString(tensor_meta.dtype);
        } else {
          VLOG(7) << "Init GradTensorHolder with UNDEFINED";
          input_dtypes_[i][j] = phi::DataType::UNDEFINED;
        }
      }
    }
  }

  GradTensorHolder(const GradTensorHolder& other) = default;

  GradTensorHolder& operator=(const GradTensorHolder& other) = default;

  // Create new tensor and copy tensor->impl
  PADDLE_API void add(size_t slot_id,
                      size_t rank,
                      const paddle::Tensor& t,
                      bool create_graph = false);

  PADDLE_API void CopyValueFromTensor(size_t slot_id,
                                      size_t rank,
                                      const paddle::Tensor& t,
                                      bool fill_one = false);

  const std::vector<paddle::Tensor>& operator[](const size_t& pos) {
    return buffer_[pos];
  }

  paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>&
  Buffers() {
    return buffer_;
  }

  PADDLE_API void SetBufferSlotRankZeros(size_t slot_id, size_t rank);

  // Validate and convert gradient tensor to match target meta
  PADDLE_API paddle::Tensor ValidateGradient(size_t slot_id,
                                             size_t rank,
                                             const paddle::Tensor& grad_tensor);

  // Set shared buffer
  PADDLE_API void SetBuffers(
      paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>&&
          new_buffer);

 private:
  paddle::small_vector<std::vector<paddle::Tensor>, kSlotSmallVectorSize>
      buffer_;
  // Store input dtypes for gradient validation
  // If GradSlotMeta is needed (e.g. place, layout, etc.), we can store
  // input_metas_ instead
  paddle::small_vector<std::vector<phi::DataType>, kSlotSmallVectorSize>
      input_dtypes_;
};

}  // namespace egr
