#include <cuda_fp16.h>
#include <iostream>
#include <cmath>

// input data:  1 2 3 4 5 6 7 8
// output data: 0.000576613 0.0015674 0.00426062 0.0115816 0.031482 0.0855769 0.232622 0.632333

// Warp-level reduction for max value (support FP16/FP32)
template <typename T>
__device__ __forceinline__ T warpReduceMax(T val) {
	for (int offset = 16; offset > 0; offset >>= 1) {
		T tmp = __shfl_down_sync(0xFFFFFFFF, val, offset);
		val = max(val, tmp);
	}
	return val;
}

// Warp-level reduction for sum (support FP16/FP32)
template <typename T>
__device__ __forceinline__ T warpReduceSum(T val) {
	for (int offset = 16; offset > 0; offset >>= 1) {
		val += __shfl_down_sync(0xFFFFFFFF, val, offset);
	}
	return val;
}

// Softmax kernel with deterministic input 
template <typename T>
__global__ void softmax_kernel(T* output, const T* input, int N) {
	extern __shared__ __align__(sizeof(T)) unsigned char shared_mem[];
	T* s_max = reinterpret_cast<T*>(shared_mem);
	T* s_sum = s_max + 1;

	// Phase 1: Find max value in block 
	T max_val = -INFINITY;
	for (int i = threadIdx.x; i < N; i += blockDim.x) {
		max_val = max(max_val, input[i]);
	}
	max_val = warpReduceMax(max_val);
	if (threadIdx.x == 0) *s_max = max_val;
	__syncthreads();

	// Phase 2: Compute exp and sum 
	T sum = 0;
	for (int i = threadIdx.x; i < N; i += blockDim.x) {
		T val = exp(input[i] - *s_max);
		output[i] = val;
		sum += val;
	}
	sum = warpReduceSum(sum);
	if (threadIdx.x == 0) *s_sum = sum;
	__syncthreads();

	// Phase 3: Normalization 
	for (int i = threadIdx.x; i < N; i += blockDim.x) {
		output[i] /= *s_sum;
	}
}

int main() {
	const int N = 8; // Fixed input size for verification 
	float h_input[N] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; // Deterministic input 
	float h_output[N];

	float *d_input, *d_output;
	cudaMalloc(&d_input, N * sizeof(float));
	cudaMalloc(&d_output, N * sizeof(float));
	cudaMemcpy(d_input, h_input, N * sizeof(float), cudaMemcpyHostToDevice);

	dim3 block(4);  // Small block size for demo 
	dim3 grid(1);
	size_t shared_size = 2 * sizeof(float); // Stores max and sum 

	softmax_kernel<float><<<grid, block, shared_size>>>(d_output, d_input, N);
	cudaDeviceSynchronize();

	cudaMemcpy(h_output, d_output, N * sizeof(float), cudaMemcpyDeviceToHost);

	std::cout << "input data:  ";
	for (int i = 0; i < N; ++i) std::cout << h_input[i] << " ";
	std::cout << std::endl;

	std::cout << "output data: ";
	for (int i = 0; i < N; ++i) std::cout << h_output[i] << " ";
	std::cout << std::endl;

	cudaFree(d_input);
	cudaFree(d_output);
	return 0;
}
