

typedef float ft;
const int my_L = 128;
const int my_M = 128;
const int my_N = 128;

template <typename T>
__global__ __launch_bounds__(512) void gpu_version2(
    const T* __restrict__ input,
    T* __restrict__ output,
    const T* __restrict__ matrix,
    const int L,
    const int M,
    const int N)
{
    extern __shared__ T smem[];
    const int idx = threadIdx.x;
    const int k = blockIdx.x;
    
    // Phase 1: Vectorized load and reduction for input
    T v1 = 0;
    
    // Check alignment for vectorized access
    #pragma unroll(4)
    for (int i = idx; i < M; i += blockDim.x) {
        v1 += input[k*M*L + idx*M + i];
    }
    
    // Parallel reduction in shared memory (with padding to avoid bank conflicts)
    smem[idx] = v1;
    __syncthreads();
    
    for (int s = blockDim.x/2; s > 32; s >>= 1) {
        if (idx < s) {
            smem[idx] += smem[idx + s];
        }
        __syncthreads();
    }
    
    // Warp-level reduction
    if (idx < 32) {
        volatile T* vsmem = smem;
        vsmem[idx] += vsmem[idx + 32];
        vsmem[idx] += vsmem[idx + 16];
        vsmem[idx] += vsmem[idx + 8];
        vsmem[idx] += vsmem[idx + 4];
        vsmem[idx] += vsmem[idx + 2];
        vsmem[idx] += vsmem[idx + 1];
    }
    
    __syncthreads();
    v1 = smem[0] / M;
    
    // Phase 2: Matrix multiplication with shared memory caching
    for (int i = 0; i < L; ++i) {
        T val = v1 * matrix[i*L + idx];
        smem[idx] = val;
        __syncthreads();
        
        // Parallel reduction
        for (int s = blockDim.x/2; s > 32; s >>= 1) {
            if (idx < s) {
                smem[idx] += smem[idx + s];
            }
            __syncthreads();
        }
        
        // Warp-level reduction
        if (idx < 32) {
            volatile T* vsmem = smem;
            vsmem[idx] += vsmem[idx + 32];
            vsmem[idx] += vsmem[idx + 16];
            vsmem[idx] += vsmem[idx + 8];
            vsmem[idx] += vsmem[idx + 4];
            vsmem[idx] += vsmem[idx + 2];
            vsmem[idx] += vsmem[idx + 1];
        }
        
        if (idx == 0) {
            output[k + i*N] = smem[0];
        }
        __syncthreads();
    }
}

void test_kernel_launch(
    float *d_input, float *d_output, float *d_matrix,
    int L, int M, int N, cudaStream_t stream)
{
    // Use optimal block size (at least 512 threads when possible)
    int block_size = min(512, L);
    int grid_size = N;

    // Calculate shared memory size (padded to avoid bank conflicts)
    size_t shmem_size = ((block_size + 31)/32)*32 * sizeof(float);
    
    gpu_version2<float><<<grid_size, block_size, shmem_size, stream>>>(
        d_input, d_output, d_matrix, L, M, N);
}
