// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/compute/util_internal.h>

#include <nebula/compute/util.h>
#include <nebula/core/memory_pool.h>

#ifdef ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif

namespace nebula {
namespace util {

TempVectorStack::~TempVectorStack() {
#ifdef ADDRESS_SANITIZER
  if (buffer_) {
    ASAN_UNPOISON_MEMORY_REGION(buffer_->mutable_data(), buffer_size_);
  }
#endif
}

turbo::Status TempVectorStack::init(MemoryPool* pool, int64_t size) {
  num_vectors_ = 0;
  top_ = 0;
  buffer_size_ = EstimatedAllocationSize(size);
  TURBO_MOVE_OR_RAISE(auto buffer, allocate_resizable_buffer(buffer_size_, pool));
#ifdef ADDRESS_SANITIZER
  ASAN_POISON_MEMORY_REGION(buffer->mutable_data(), buffer_size_);
#endif
  buffer_ = std::move(buffer);
  return turbo::OkStatus();
}

int64_t TempVectorStack::PaddedAllocationSize(int64_t num_bytes) {
  // Round up allocation size to multiple of 8 bytes
  // to avoid returning temp vectors with unaligned address.
  //
  // Also add padding at the end to facilitate loads and stores
  // using SIMD when number of vector elements is not divisible
  // by the number of SIMD lanes.
  //
  return ::nebula::bit_util::RoundUp(num_bytes, sizeof(int64_t)) + kPadding;
}

void TempVectorStack::alloc(uint32_t num_bytes, uint8_t** data, int* id) {
  int64_t estimated_alloc_size = EstimatedAllocationSize(num_bytes);
  int64_t new_top = top_ + estimated_alloc_size;
  // Stack overflow check (see GH-39582).
  // XXX cannot return a regular turbo::Status because most consumers do not either.
  KCHECK_LE(new_top, buffer_size_)
      << "TempVectorStack::alloc overflow: allocating " << estimated_alloc_size
      << " on top of " << top_ << " in stack of size " << buffer_size_;
#ifdef ADDRESS_SANITIZER
  ASAN_UNPOISON_MEMORY_REGION(buffer_->mutable_data() + top_, estimated_alloc_size);
#endif
  *data = buffer_->mutable_data() + top_ + /*one guard*/ sizeof(uint64_t);
#ifndef NDEBUG
  // We set 8 bytes before the beginning of the allocated range and
  // 8 bytes after the end to check for stack overflow (which would
  // result in those known bytes being corrupted).
  reinterpret_cast<uint64_t*>(buffer_->mutable_data() + top_)[0] = kGuard1;
  reinterpret_cast<uint64_t*>(buffer_->mutable_data() + new_top)[-1] = kGuard2;
#endif
  *id = num_vectors_++;
  top_ = new_top;
}

void TempVectorStack::release(int id, uint32_t num_bytes) {
  DKCHECK(num_vectors_ == id + 1);
  int64_t size = EstimatedAllocationSize(num_bytes);
  DKCHECK(reinterpret_cast<const uint64_t*>(buffer_->mutable_data() + top_)[-1] ==
               kGuard2);
  DKCHECK(top_ >= size);
  top_ -= size;
  DKCHECK(reinterpret_cast<const uint64_t*>(buffer_->mutable_data() + top_)[0] ==
               kGuard1);
#ifdef ADDRESS_SANITIZER
  ASAN_POISON_MEMORY_REGION(buffer_->mutable_data() + top_, size);
#endif
  --num_vectors_;
}

}  // namespace util
}  // namespace nebula
