#pragma once
#include "base.h"
#include <Utility/cuda_device.cuh>
#include "build_kernels_cuda.cuh"
#include "traverse_cuda.cuh"

#include "lbvh.h"

namespace shermit {
namespace lbvh {
	template<class BVSOA_t>
	class LBVH_CUDA :public LBVH<BVSOA_t> {
	public:
		LBVH_CUDA() : leaf_size(0) {
			// register build kernels
			/*
			MCudaDevice& CudaDevice = MCudaDevice::getInstance();
			CudaDevice.registerKernel(calcSceneBV_1<BVSOA_t>, cudaFuncCachePreferShared);
			CudaDevice.registerKernel(calcSceneBV_2<BVSOA_t>, cudaFuncCachePreferShared);
			CudaDevice.registerKernel(calcMortonCodes_32b<BVSOA_t>);
			CudaDevice.registerKernel(reorderLeaves<BVSOA_t>);
			CudaDevice.registerKernel(calcBuildMetrics<BVSOA_t>);
			CudaDevice.registerKernel(buildInteriors<BVSOA_t>);
			CudaDevice.registerKernel(calcInteriorNewIdx<BVSOA_t>);
			CudaDevice.registerKernel(reorderInteriors<BVSOA_t>);
			*/
		}
		~LBVH_CUDA() {
			this->free();
		}
		
		bool isInitialized() const {
			return leaf_size;
		}

		template<class BuildManager_t>
		void init(const BuildManager_t& object_manager) {
			leaf_size = object_manager.primSize();

			leaves.allocateDevice(leaf_size);
			interiors.allocateDevice(leaf_size - 1);

			MCudaDevice& CudaDevice = MCudaDevice::getInstance();
			// calculate the BVs of all primitives
			CudaDevice.launchKernel(leaf_size, 0,  GPU::buildBVs, object_manager, leaves.bv);

			// using parallel reduce algorithm to calculate scene BV
			unsigned int size_bv_struct_t = sizeof(typename BVSOA_t::bv_struct_t);
			auto p = CudaDevice.getOptLaunchParams(leaf_size, size_bv_struct_t, GPU::calcSceneBV_1<BVSOA_t>);
			int gs = p.first, bs = p.second;
			thrust::device_vector<typename BVSOA_t::bv_struct_t> d_bv_out(gs);
			// phase 1
			GPU::calcSceneBV_1 << <gs, bs, size_bv_struct_t* bs >> > (leaf_size, leaves.bv, getRawPtr(d_bv_out));

			// recursively do phase 2 until final result is computed (gridSize=1)
			while (gs != 1) {
				int size = gs;
				p = CudaDevice.getOptLaunchParams(size, size_bv_struct_t, GPU::calcSceneBV_2<typename BVSOA_t::bv_struct_t>);
				gs = p.first, bs = p.second;
				GPU::calcSceneBV_2 << <gs, bs, size_bv_struct_t* bs >> > (size, getRawPtr(d_bv_out), getRawPtr(d_bv_out));
				cudaDeviceSynchronize();
				getLastCudaError("calcSceneBV_2 failure");
			}

			// calculate float32 morton code for each BV
			CudaDevice.launchKernel(leaf_size, 0, GPU::calcMortonCodes_32b, leaf_size, leaves, getRawPtr(d_bv_out));

			// reorder leaves using morton code
			thrust::device_vector<int> mapidx(leaf_size);
			checkThrustErrors(thrust::sequence(mapidx.begin(), mapidx.end()));
			thrust::sort_by_key(getDevicePtr(leaves.morton), getDevicePtr(leaves.morton + leaf_size), mapidx.begin());

			// allocate memory for sorted leaves
			LBvhLeaves<BVSOA_t> leaves_sorted;
			leaves_sorted.allocateDevice(leaf_size);

			CudaDevice.launchKernel(leaf_size, 0, GPU::reorderLeaves, leaf_size, leaves, leaves_sorted, getRawPtr(mapidx));
			cudaDeviceSynchronize();
			getLastCudaError("reorderLeaves kernel failure");
			// free the memory of old leaves
			leaves.deallocateDevice();
			this->leaves = leaves_sorted;


			// calculate metrics
			thrust::device_vector<unsigned int> metrics(leaf_size - 1);
			CudaDevice.launchKernel(leaf_size - 1, 0, GPU::calcBuildMetrics, leaf_size - 1, leaves, getRawPtr(metrics));

			// build interior nodes
			thrust::device_vector<int> visitCount(leaf_size - 1, 0);
			thrust::device_vector<int> leftLeafCount(leaf_size - 1, 0);
			InteriorBuildAid aid{ getRawPtr(metrics),getRawPtr(visitCount),getRawPtr(leftLeafCount) };
			CudaDevice.launchKernel(leaf_size, 0, GPU::buildInteriors, leaf_size, leaves, interiors, aid);

			// reorder interiors
			// allocate memory for sorted interiors
			LBvhInteriors<BVSOA_t> interiors_sorted;
			interiors_sorted.allocateDevice(leaf_size - 1);

			thrust::exclusive_scan(getDevicePtr(leaves.segLen), getDevicePtr(leaves.segLen + leaf_size), getDevicePtr(leaves.segOffset));

			CudaDevice.launchKernel(leaf_size - 1, 0, GPU::calcInteriorNewIdx, leaf_size - 1, leaves, interiors, getRawPtr(leftLeafCount), getRawPtr(mapidx));
			CudaDevice.launchKernel(leaf_size - 1, 0, GPU::reorderInteriors, leaf_size - 1, leaves, interiors, interiors_sorted, getRawPtr(mapidx));
			cudaDeviceSynchronize();
			getLastCudaError("reorderInteriors kernel failure");
			// free the memory of old leaves
			interiors.deallocateDevice();

			this->interiors = interiors_sorted;
		}

		void free() override {
			if (leaf_size) {
				leaves.deallocateDevice();
				interiors.deallocateDevice();
				leaf_size = 0;
			}
		}

		template<class QueryManager_t>
		void testIntersection(const QueryManager_t& object_manager) {
			MCudaDevice& CudaDevice = MCudaDevice::getInstance();
			CudaDevice.launchKernel(object_manager.primSize(), 0, GPU::traverseLbvhCustom, object_manager, leaf_size, leaves, interiors);
			cudaDeviceSynchronize();
			getLastCudaError("traverseLbvhCustom kernel failure");
		}

		template<class QueryManager_t>
		void testIntraIntersection(const QueryManager_t& object_manager) {
			MCudaDevice& CudaDevice = MCudaDevice::getInstance();
			CudaDevice.launchKernel(leaf_size - 1, 0, GPU::traverseLbvhIntra, object_manager, leaf_size, leaves, interiors);
			cudaDeviceSynchronize();
			getLastCudaError("traverseLbvhIntra kernel failure");
		}
	protected:
		LBvhLeaves<BVSOA_t> leaves;
		LBvhInteriors<BVSOA_t> interiors;
		int leaf_size;
	};
}
}