/**
 * @file base.cpp
 * @brief 
 * @author WeixiongLin (wx_lin@outlook.com)
 * @version 1.0
 * @date 2022-11-09
 * 
 * @copyright Copyright (c) 2022  WeixiongLin
 * 
 * @par 修改日志:
 * <table>
 * <tr><th>Date       <th>Version <th>Author  <th>Description
 * <tr><td>2022-11-09 <td>1.0     <td>WeixiongLin     <td>内容
 * </table>
 * nvcc base.cu -o base
 */
#include <iostream>
#include <vector>
#include <string>
#include <algorithm>
#include <sstream>
#include <chrono>
#include <thread>

#define sleep(t) std::this_thread::sleep_for(std::chrono::milliseconds(t))

const float bytes_per_gb = (1 << 30);
const int max_gpu_num = 32;


/**
 * @brief 向目标 GPUs 分配一定内存
 * @param  array            用来获取内存的数组
 * @param  occupy_size      希望获得的内存大小
 * @param  gpu_ids          目标 GPU
 */
void allocate_mem(char** array, size_t occupy_size, std::vector<int>& gpu_ids) {
  std::vector<bool> allocated(max_gpu_num, false);
  int cnt = 0;
  while (true) {
    printf("Try allocate GPU memory %d times >>>>>>>>>>>>>>>>>>>>\n", ++cnt);
    bool all_allocated = true;
    for (int id : gpu_ids) {
      if (!allocated[id]) {
        cudaSetDevice(id);
        cudaError_t status = cudaMalloc(&array[id], occupy_size);
        size_t total_size, avail_size;
        cudaMemGetInfo(&avail_size, &total_size);
        if (status != cudaSuccess) {
          printf(
            "GPU-%d: Failed to allocate %.2f GB GPU memory (%.2f GB "
            "available)\n",
            id, occupy_size / bytes_per_gb, avail_size / bytes_per_gb);
          all_allocated = false;
        } else {
          allocated[id] = true;
          printf(
            "GPU-%d: Successfully allocate %.2f GB GPU memory (%.2f GB "
            "available)\n",
            id, occupy_size / bytes_per_gb, avail_size / bytes_per_gb);
        }
      }
    }
    if (all_allocated) break;
    sleep(5000);
  }
  printf("Successfully allocate memory on all GPUs!\n");
}

// 不断二分, 获取所有内存
void pump_out_mem(std::vector<int>& gpu_ids) {
  char *array[max_gpu_num];
  size_t total_size, avail_size;
  cudaSetDevice(1);
  while(true) {
    cudaMemGetInfo(&avail_size, &total_size);
    allocate_mem(array, avail_size / 2, gpu_ids);
  }
}


int main()
{
  int gpu_num;
  cudaGetDeviceCount(&gpu_num);
  printf("%d\n", gpu_num);

  size_t total_size, avail_size;
  cudaMemGetInfo(&avail_size, &total_size);
  printf("avail_size: %zu\n", avail_size);
  printf("total_size: %.2f\n", total_size / bytes_per_gb);
  // printf("total_size: %zu\n", total_size);

  // char* array[max_gpu_num];
  // std::vector<int> gpu_ids = {1};
  // size_t occupy_size = 1;
  // std::vector<int> gpu_ids = {0, 1};
  // allocate_mem(array, occupy_size * bytes_per_gb / 10.0, gpu_ids);
  std::vector<int> gpu_ids = {1};
  pump_out_mem(gpu_ids);

  return 0;
}
