#pragma once
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <utility>

namespace shermit {
	/// Singleton of the cuda device
	class MCudaDevice {
	private:
		void calcBlockSize(int* minGridSize, int* optBlockSize, void(*func), int smem, int numThread) {
			cudaOccupancyMaxPotentialBlockSize(minGridSize, optBlockSize, func, smem, numThread);
		}

		/// find the next power of 2 that is >= than v
		inline unsigned int dtNextPow2(unsigned int v) {
			v--;
			v |= v >> 1;
			v |= v >> 2;
			v |= v >> 4;
			v |= v >> 8;
			v |= v >> 16;
			v++;
			return v;
		}
	public:
		~MCudaDevice() {}
		MCudaDevice(const MCudaDevice&) = delete;
		MCudaDevice& operator=(const MCudaDevice&) = delete;

		static MCudaDevice& getInstance() {
			static MCudaDevice instance;
			return instance;
		}
		///< prefer in-block shared memory or L1 cache, or neither
		void setCacheMemPreference(cudaFuncCache pref) { blockCacheMemPreference = pref; }

		void registerKernel(const void* kernel) { cudaFuncSetCacheConfig(kernel, blockCacheMemPreference); }

		void registerKernel(const void* kernel, cudaFuncCache cacheMemPref) { cudaFuncSetCacheConfig(kernel, cacheMemPref); }

		template<typename... Args>
		void run(int gs, int bs, void(*func)(Args...), Args... args) {
			func << < gs, bs >> > (args...);
		}

		/// run kernel with optimized configurations
		template<typename... Args>
		std::pair<int, int> launchKernel(int numThread, unsigned int smemItemSize, void(*func)(Args...), Args... args) {
			auto p = getOptLaunchParams(numThread, smemItemSize, func);
			run(p.first, p.second, func, args...);
			return p;
		}


		std::pair<int, int> getOptLaunchParams(int numThread, unsigned int smemItemSize, void(*func)) {
			int gridSize, bs;
			if (smemItemSize == 0) {
				int minGridSize, optBlockSize;
				cudaOccupancyMaxPotentialBlockSize(&minGridSize, &optBlockSize, func, 0, numThread);
				bs = optBlockSize;
				gridSize = (numThread + bs - 1) / bs;
			}
			else {
				// blocksize constrained by available shared memory
				unsigned int smemPerBlock = 512 * smemItemSize;
				if (smemPerBlock > prop.sharedMemPerBlock) smemPerBlock = prop.sharedMemPerBlock;
				int minGridSize, optBlockSize;
				cudaOccupancyMaxPotentialBlockSize(&minGridSize, &optBlockSize, func, smemPerBlock, numThread);
				bs = dtNextPow2(optBlockSize);
				gridSize = (numThread + bs - 1) / bs;
				if ((bs > optBlockSize && gridSize != 1) || (bs * smemItemSize > prop.sharedMemPerBlock)) {
					bs >>= 1;
					gridSize = (numThread + bs - 1) / bs;
				}
			}
			return std::pair<int, int>(gridSize, bs);
		}

	private:
		MCudaDevice() {
			blockCacheMemPreference = cudaFuncCachePreferL1;
			cudaGetDeviceProperties(&prop, 0);
		}
		cudaDeviceProp prop;
		cudaFuncCache blockCacheMemPreference; //strategy for cache / shared memory preference
	};
}