// Copyright 2019 The TCMalloc Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef TCMALLOC_GUARDED_PAGE_ALLOCATOR_H_
#define TCMALLOC_GUARDED_PAGE_ALLOCATOR_H_

#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>

#include <utility>

#include "absl/base/attributes.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/thread_annotations.h"
#include "tcmalloc/common.h"
#include "tcmalloc/internal/logging.h"

namespace tcmalloc {

extern absl::base_internal::SpinLock guarded_page_lock;

// An allocator that gives each allocation a new region, with guard pages on
// either side of the allocated region.  If a buffer is overflowed to the next
// guard page or underflowed to the previous guard page, a segfault occurs.
// After an allocation is freed, the underlying page is marked as inaccessible,
// and any future accesses to it will also cause segfaults until the page is
// reallocated.
//
// Is safe to use with static storage duration and is thread safe with the
// exception of calls to Init() and Destroy() (see corresponding function
// comments).
//
// SYNCHRONIZATION
//   Requires the SpinLock guarded_page_lock to be defined externally.  This is
//   required so that this class may be instantiated with static storage
//   duration.  The lock is held by this class during initialization and when
//   accessing the internal free page map.
//
// Example:
//   absl::base_internal::SpinLock
//   guarded_page_lock(absl::base_internal::kLinkerInitialized);
//   GuardedPageAllocator gpa;
//
//   void foo() {
//     char *buf = reinterpret_cast<char *>(gpa.Allocate(8000, 1));
//     buf[0] = 'A';            // OK. No segfault occurs.
//     memset(buf, 'A', 8000);  // OK. No segfault occurs.
//     buf[-300] = 'A';         // Segfault!
//     buf[9000] = 'A';         // Segfault!
//     gpa.Deallocate(buf);
//     buf[0] = 'B';            // Segfault!
//   }
//
//   int main() {
//     // Call Init() only once.
//     gpa.Init(64, GuardedPageAllocator::kGpaMaxPages);
//     gpa.AllowAllocations();
//     for (int i = 0; i < 1000; i++) foo();
//     return 0;
//   }
class GuardedPageAllocator {
 public:
  struct GpaStackTrace {
    void *stack[kMaxStackDepth];
    size_t depth;
    pid_t tid;
  };

  // Maximum number of pages this class can allocate.
  static constexpr size_t kGpaMaxPages = 512;

  enum class ErrorType {
    kUseAfterFree,
    kBufferUnderflow,
    kBufferOverflow,
    kDoubleFree,
    kBufferOverflowOnDealloc,
    kUnknown,
  };

  constexpr GuardedPageAllocator()
      : free_pages_{},
        num_alloced_pages_(0),
        num_alloced_pages_max_(0),
        num_allocation_requests_(0),
        num_failed_allocations_(0),
        data_(nullptr),
        pages_base_addr_(0),
        pages_end_addr_(0),
        first_page_addr_(0),
        max_alloced_pages_(0),
        total_pages_(0),
        page_size_(0),
        rand_(0),
        initialized_(false),
        allow_allocations_(false),
        double_free_detected_(false),
        write_overflow_detected_(false) {}

  GuardedPageAllocator(const GuardedPageAllocator &) = delete;
  GuardedPageAllocator &operator=(const GuardedPageAllocator &) = delete;

  ~GuardedPageAllocator() = default;

  // Configures this allocator to allocate up to max_alloced_pages pages at a
  // time from a pool of total_pages pages, where:
  //   1 <= max_alloced_pages <= total_pages <= kGpaMaxPages
  //
  // This method should be called non-concurrently and only once to complete
  // initialization.  Dynamic initialization is deliberately done here and not
  // in the constructor, thereby allowing the constructor to be constexpr and
  // avoiding static initialization order issues.
  void Init(size_t max_alloced_pages, size_t total_pages)
      EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);

  // Unmaps memory allocated by this class.
  //
  // This method should be called non-concurrently and only once to complete
  // destruction.  Destruction is deliberately done here and not in the
  // destructor, thereby allowing the destructor to be trivial (i.e. a no-op)
  // and avoiding use-after-destruction issues for static/global instances.
  void Destroy();

  // On success, returns a pointer to size bytes of page-guarded memory, aligned
  // to alignment.  On failure, returns nullptr.  The returned pointer is
  // guaranteed to be tagged.  Failure can occur if memory could not be mapped
  // or protected, if all guarded pages are already allocated, or if size is 0.
  //
  // Precondition:  size and alignment <= page_size_
  // Precondition:  alignment is 0 or a power of 2
  void *Allocate(size_t size, size_t alignment)
      LOCKS_EXCLUDED(guarded_page_lock);

  // Deallocates memory pointed to by ptr.  ptr must have been previously
  // returned by a call to Allocate.
  void Deallocate(void *ptr) LOCKS_EXCLUDED(guarded_page_lock);

  // Returns the size requested when ptr was allocated.  ptr must have been
  // previously returned by a call to Allocate.
  size_t GetRequestedSize(const void *ptr) const;

  // Returns ptr's offset from the beginning of its allocation along with the
  // allocation's size.
  std::pair<off_t, size_t> GetAllocationOffsetAndSize(const void *ptr) const;

  // Records stack traces in alloc_trace and dealloc_trace for the page nearest
  // to ptr.  alloc_trace is the trace at the time the page was allocated.  If
  // the page is still allocated, dealloc_trace->depth will be 0. If the page
  // has been deallocated, dealloc_trace is the trace at the time the page was
  // deallocated.
  //
  // Returns the likely error type for an access at ptr.
  //
  // Requires that ptr points to memory mapped by this class.
  ErrorType GetStackTraces(const void *ptr, GpaStackTrace *alloc_trace,
                           GpaStackTrace *dealloc_trace) const;

  // Writes a human-readable summary of GuardedPageAllocator's internal state to
  // *out.
  void Print(TCMalloc_Printer *out) LOCKS_EXCLUDED(guarded_page_lock);
  void PrintInPbtxt(PbtxtRegion *gwp_asan) const
      LOCKS_EXCLUDED(guarded_page_lock);

  // Returns true if ptr points to memory managed by this class.
  inline bool ABSL_ATTRIBUTE_ALWAYS_INLINE
  PointerIsMine(const void *ptr) const {
    uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
    return pages_base_addr_ <= addr && addr < pages_end_addr_;
  }

  // Allows Allocate() to start returning allocations.
  void AllowAllocations() LOCKS_EXCLUDED(guarded_page_lock) {
    absl::base_internal::SpinLockHolder h(&guarded_page_lock);
    allow_allocations_ = true;
  }

 private:
  // Structure for storing data about a slot.
  struct SlotMetadata {
    GpaStackTrace alloc_trace;
    GpaStackTrace dealloc_trace;
    size_t requested_size;
    uintptr_t allocation_start;
  };

  // Max number of magic bytes we use to detect write-overflows at deallocation.
  static constexpr size_t kMagicSize = 32;

  // Maps pages into memory.
  void MapPages() LOCKS_EXCLUDED(guarded_page_lock)
      EXCLUSIVE_LOCKS_REQUIRED(pageheap_lock);

  // Reserves and returns a slot randomly selected from the free slots in
  // free_pages_.  Returns -1 if no slots available, or if AllowAllocations()
  // hasn't been called yet.
  ssize_t ReserveFreeSlot() LOCKS_EXCLUDED(guarded_page_lock);

  // Returns the i-th free slot of free_pages_.  i must be in the range [0,
  // total_pages_ - num_alloced_pages_).
  size_t GetIthFreeSlot(size_t i) EXCLUSIVE_LOCKS_REQUIRED(guarded_page_lock);

  // Marks the specified slot as unreserved.
  void FreeSlot(size_t slot) EXCLUSIVE_LOCKS_REQUIRED(guarded_page_lock);

  // Returns the address of the page that addr resides on.
  uintptr_t GetPageAddr(uintptr_t addr) const;

  // Returns an address somewhere on the valid page nearest to addr.
  uintptr_t GetNearestValidPage(uintptr_t addr) const;

  // Returns the slot number for the page nearest to addr.
  size_t GetNearestSlot(uintptr_t addr) const;

  // Returns true if the specified slot has already been freed.
  bool IsFreed(size_t slot) const EXCLUSIVE_LOCKS_REQUIRED(guarded_page_lock);

  // Returns true if magic bytes for slot were overwritten.
  bool WriteOverflowOccurred(size_t slot) const;

  // Returns the likely error type for the given trace depths and access
  // address.
  ErrorType GetErrorType(uintptr_t addr, uintptr_t alloc_trace_depth,
                         uintptr_t dealloc_trace_depth) const;

  // Magic constant used for detecting write-overflows at deallocation time.
  static uint8_t GetWriteOverflowMagic(size_t slot) {
    // Only even slots get magic bytes, so use slot / 2 for more unique magics.
    return uint8_t{0xcd} * static_cast<uint8_t>(slot / 2);
  }

  // Returns true if slot should be right aligned.
  static bool ShouldRightAlign(size_t slot) { return slot % 2 == 0; }

  // If slot is marked for right alignment, moves the allocation in *ptr to the
  // right end of the slot, maintaining the specified size and alignment.  Magic
  // bytes are written in any alignment padding.
  void MaybeRightAlign(size_t slot, size_t size, size_t alignment, void **ptr);

  uintptr_t SlotToAddr(size_t slot) const;
  size_t AddrToSlot(uintptr_t addr) const;

  // Maps each bool to one page.
  // true: Free.  false: Reserved.
  bool free_pages_[kGpaMaxPages] GUARDED_BY(guarded_page_lock);

  // Number of currently-allocated pages.
  size_t num_alloced_pages_ GUARDED_BY(guarded_page_lock);

  // The high-water mark for num_alloced_pages_.
  size_t num_alloced_pages_max_ GUARDED_BY(guarded_page_lock);

  // Number of calls to Allocate.
  size_t num_allocation_requests_ GUARDED_BY(guarded_page_lock);

  // Number of times Allocate has failed.
  size_t num_failed_allocations_ GUARDED_BY(guarded_page_lock);

  // A dynamically-allocated array of stack trace data captured when each page
  // is allocated/deallocated.  Printed by the SEGV handler when a memory error
  // is detected.
  SlotMetadata *data_;

  uintptr_t pages_base_addr_;  // Points to start of mapped region.
  uintptr_t pages_end_addr_;   // Points to the end of mapped region.
  uintptr_t first_page_addr_;  // Points to first page returnable by Allocate.
  size_t max_alloced_pages_;   // Max number of pages to allocate at once.
  size_t total_pages_;         // Size of the page pool to allocate from.
  size_t page_size_;           // Size of pages we allocate.
  uint64_t rand_;                // RNG seed.

  // True if this object has been fully initialized.
  bool initialized_ GUARDED_BY(guarded_page_lock);

  // Flag to control whether we can return allocations or not.
  bool allow_allocations_ GUARDED_BY(guarded_page_lock);

  // Set to true if a double free has occurred.
  bool double_free_detected_;

  // Set to true if a write overflow was detected on deallocation.
  bool write_overflow_detected_;

  friend struct ConstexprCheck;
};

struct ConstexprCheck {
  static_assert(GuardedPageAllocator().rand_ || true,
                "GuardedPageAllocator must have a constexpr constructor");
};

}  // namespace tcmalloc

#endif  // TCMALLOC_GUARDED_PAGE_ALLOCATOR_H_
