// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifdef PADDLE_WITH_CUDA
#include <cuda.h>
#include <cuda_runtime.h>
#endif

#include <string>

#include "glog/logging.h"

#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/memory/allocation/cuda_virtual_mem_allocator.h"

#ifdef PADDLE_WITH_CUDA
#include "paddle/phi/backends/dynload/cuda_driver.h"
#include "paddle/phi/core/platform/cuda_device_guard.h"
#include "paddle/phi/core/platform/device/gpu/gpu_info.h"

namespace paddle::memory::allocation {

std::mutex CUDAVirtualMemAllocator::base_ptr_handle_mu_;
std::unordered_map<void*, CUmemGenericAllocationHandle>
    CUDAVirtualMemAllocator::base_ptr_handle_map_;

CUDAVirtualMemAllocator::CUDAVirtualMemAllocator(const phi::GPUPlace& place)
    : place_(place), virtual_mem_base_(0), prop_{} {
  CUmemAllocationProp prop = {};

  // Setup the properties common for all the chunks
  // The allocations will be device pinned memory.
  // This property structure describes the physical location where the memory
  // will be allocated via cuMemCreate along with additional properties In this
  // case, the allocation will be pinned device memory local to a given device.
  prop.type = CU_MEM_ALLOCATION_TYPE_PINNED;
  prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
  prop.location.id = place.device;  // NOLINT
#if defined(_WIN32)
  prop.requestedHandleTypes = CU_MEM_HANDLE_TYPE_NONE;
#else
  prop.requestedHandleTypes = CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR;
#endif
  prop_ = prop;

  // Prepare the access descriptor array indicating where and how the backings
  // should be visible.
  access_desc_.clear();
  {
    CUmemAccessDesc self = {};
    self.location.type = CU_MEM_LOCATION_TYPE_DEVICE;
    self.location.id = place.device;
    self.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE;
    access_desc_.push_back(self);
  }
}

void CUDAVirtualMemAllocator::InitOnce() {
  std::call_once(init_flag_, [this] {
    platform::SetDeviceId(place_.device);
    paddle::platform::CUDADeviceGuard guard(place_.device);

    // Get the minimum granularity.
    size_t granularity = 0;
    PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cuMemGetAllocationGranularity(
        &granularity, &prop_, CU_MEM_ALLOC_GRANULARITY_MINIMUM));
    granularity_ = granularity;

    // total size & VA size
    size_t actual_avail, actual_total;
    PADDLE_ENFORCE_GPU_SUCCESS(cudaMemGetInfo(&actual_avail, &actual_total));
    VLOG(1) << "VMM InitOnce dev " << place_.device << " actual_avail: "
            << static_cast<double>(actual_avail) / (1 << 20) << " MB, "
            << "actual_total: " << static_cast<double>(actual_total) / (1 << 20)
            << " MB";

    virtual_mem_size_ = AlignedSize(actual_total, granularity_);

    // Reserve the required contiguous virtual address space for the allocations
    // The maximum video memory size we can apply for is the video memory size
    // of GPU, so the virtual address space size we reserve is equal to the GPU
    // video memory size
    PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cuMemAddressReserve(
        &virtual_mem_base_, virtual_mem_size_, 0, 0, 0));

    virtual_mem_alloced_offset_ = 0;
  });
}

bool CUDAVirtualMemAllocator::IsAllocThreadSafe() const { return false; }

void CUDAVirtualMemAllocator::FreeImpl(phi::Allocation* allocation) {
  PADDLE_ENFORCE_EQ(
      allocation->place(),
      place_,
      common::errors::PermissionDenied(
          "GPU memory is freed in incorrect device. This may be a bug"));

  auto iter = virtual_2_physical_map_.find(
      reinterpret_cast<CUdeviceptr>(allocation->ptr()));
  if (iter == virtual_2_physical_map_.end()) {
    PADDLE_THROW(common::errors::InvalidArgument(
        "Can not find virtual memory address at %s", allocation->ptr()));
  }

  int prev_id;
  cudaGetDevice(&prev_id);
  if (prev_id != place_.device) {
    cudaSetDevice(place_.device);
  }

  auto result = phi::dynload::cuMemUnmap(iter->first, iter->second.second);
  if (result != CUDA_ERROR_DEINITIALIZED) {
    PADDLE_ENFORCE_GPU_SUCCESS(result);
  }

  if (result != CUDA_ERROR_DEINITIALIZED) {
    PADDLE_ENFORCE_GPU_SUCCESS(platform::RecordedGpuMemRelease(
        iter->second.first, iter->second.second, place_.device));
  }

  if (prev_id != place_.device) {
    cudaSetDevice(prev_id);
  }

  UnregisterHandle(allocation->ptr());
  virtual_2_physical_map_.erase(iter);

  delete allocation;
}

phi::Allocation* CUDAVirtualMemAllocator::AllocateImpl(size_t size) {
  InitOnce();
  size = AlignedSize(size, granularity_);

  CUdeviceptr ptr = virtual_mem_base_ + virtual_mem_alloced_offset_;

  if (ptr + size > virtual_mem_base_ + virtual_mem_size_) {
    PADDLE_THROW_BAD_ALLOC(common::errors::ResourceExhausted(
        "\n\nOut of memory error on GPU Virtual Memory %d. "
        "Cannot allocate %s memory on GPU Virtual Memory %d, %s memory has "
        "been allocated and "
        "available memory is only %s.\n\n"
        "Please decrease the batch size of your model.\n\n",
        place_.device,
        string::HumanReadableSize(size),
        place_.device,
        string::HumanReadableSize(virtual_mem_alloced_offset_),
        string::HumanReadableSize(virtual_mem_size_ -
                                  virtual_mem_alloced_offset_),
        place_.device));
    return nullptr;
  }

  CUmemGenericAllocationHandle handle;

  paddle::platform::CUDADeviceGuard guard(place_.device);

  // Create physical memory backing allocation.
  auto result =
      platform::RecordedGpuMemCreate(&handle, size, &prop_, 0, place_.device);

  if (result != CUDA_SUCCESS) {
    if (result == CUDA_ERROR_OUT_OF_MEMORY) {
      size_t actual_avail, actual_total;
      PADDLE_ENFORCE_GPU_SUCCESS(cudaMemGetInfo(&actual_avail, &actual_total));
      size_t actual_allocated = actual_total - actual_avail;

      PADDLE_THROW_BAD_ALLOC(common::errors::ResourceExhausted(
          "\n\nOut of memory error on GPU %d. "
          "Cannot allocate %s memory on GPU %d, %s memory has been allocated "
          "and "
          "available memory is only %s.\n\n"
          "Please check whether there is any other process using GPU %d.\n"
          "1. If yes, please stop them, or start PaddlePaddle on another GPU.\n"
          "2. If no, please decrease the batch size of your model.\n\n",
          place_.device,
          string::HumanReadableSize(size),
          place_.device,
          string::HumanReadableSize(actual_allocated),
          string::HumanReadableSize(actual_avail),
          place_.device));
    } else {
      PADDLE_ENFORCE_GPU_SUCCESS(result);
    }
    return nullptr;
  }

  // Assign the chunk to the appropriate VA range and release the handle.
  // After mapping the memory, it can be referenced by virtual address.
  // The allocation will be kept live until it is unmapped.
  result = phi::dynload::cuMemMap(ptr, size, 0, handle, 0);

  if (result != CUDA_SUCCESS) {
    platform::RecordedGpuMemRelease(handle, size, place_.device);
    PADDLE_ENFORCE_GPU_SUCCESS(result);
    return nullptr;
  }

  // Apply the access descriptors to the whole VA range.
  result = phi::dynload::cuMemSetAccess(
      ptr, size, access_desc_.data(), access_desc_.size());

  if (result != CUDA_SUCCESS) {
    phi::dynload::cuMemUnmap(ptr, size);
    platform::RecordedGpuMemRelease(handle, size, place_.device);
    PADDLE_ENFORCE_GPU_SUCCESS(result);
    return nullptr;
  }

  virtual_2_physical_map_.emplace(ptr, std::make_pair(handle, size));

  virtual_mem_alloced_offset_ += size;
  VLOG(10) << "AllocateImpl chunk handle: " << static_cast<int64_t>(handle)
           << ", size=" << size
           << ", device=" << static_cast<int>(place_.device);

  RegisterHandle(reinterpret_cast<void*>(ptr), handle);

  return new Allocation(
      reinterpret_cast<void*>(ptr), size, phi::Place(place_));  // NOLINT
}

CUmemGenericAllocationHandle CUDAVirtualMemAllocator::GetHandleFromBasePtr(
    void* base_ptr) {
  std::lock_guard<std::mutex> guard(base_ptr_handle_mu_);
  auto it = base_ptr_handle_map_.find(base_ptr);
  if (it == base_ptr_handle_map_.end()) {
    return 0;
  }
  return it->second;
}

void CUDAVirtualMemAllocator::RegisterHandle(
    void* base_ptr, CUmemGenericAllocationHandle handle) {
  std::lock_guard<std::mutex> guard(base_ptr_handle_mu_);
  base_ptr_handle_map_.emplace(base_ptr, handle);
}

void CUDAVirtualMemAllocator::UnregisterHandle(void* base_ptr) {
  std::lock_guard<std::mutex> guard(base_ptr_handle_mu_);
  base_ptr_handle_map_.erase(base_ptr);
}

}  // namespace paddle::memory::allocation

#endif
