#include <hip/amd_detail/amd_hip_bf16.h>
#include <torch/torch.h>
#include <torch/extension.h>

using f32x16 = __attribute__( (__vector_size__(16 * sizeof(float)) )) float;
using f16x4 = __attribute__( (__vector_size__(4 * sizeof(short)) )) short;

__device__ static f16x4 pack_f32x4_to_u64(const float* src) {
	const short* input = (const short*)src;
    f16x4 dst = { input[1], input[3], input[5], input[7] };
    return dst;
}

__global__ void permute_kernel(const short* input, f16x4* output, size_t dstStride, size_t nStride, size_t kStride) {
    input += (blockIdx.y * 32 | (threadIdx.x & 31)) * nStride + (blockIdx.x << 3 | (threadIdx.x >> 5 << 2)) * kStride;
    output += blockIdx.y * dstStride + blockIdx.x * 64;
    output[threadIdx.x] = { input[0], input[kStride], input[kStride * 2], input[kStride * 3] };
}

__global__ void permute_unalign_kernel(const short* input, f16x4* output, size_t dstStride, size_t nStride, size_t kStride, size_t n) {
	output += blockIdx.y * dstStride + blockIdx.x * 64;
	size_t row = blockIdx.y * 32 | (threadIdx.x & 31);
	size_t col = blockIdx.x << 3 | (threadIdx.x >> 5 << 2);
    input += row * nStride + col * kStride;
	if (row >= n)
		output[threadIdx.x] = { 0, 0, 0, 0 };
    else
		output[threadIdx.x] = { input[0], input[kStride], input[kStride * 2], input[kStride * 3] };
}

torch::Tensor permute(torch::Tensor input) {
    size_t n = input.size(0);
    size_t k = input.size(1);
	size_t n0 = (n + 31) >> 5;
	size_t k0 = (k + 7) >> 3;
    auto options = torch::TensorOptions().dtype(torch::kBFloat16).device(torch::kCUDA);
    torch::Tensor output = torch::empty(n0 * k0 * 256, options);
	if (n < n0 << 5)
    	permute_unalign_kernel<<<dim3(k0, n0), dim3(64)>>>((short*)input.data_ptr(), (f16x4*)output.data_ptr(), k0 * 64, input.stride(0), input.stride(1), n);
	else
		permute_kernel<<<dim3(k0, n0), dim3(64)>>>((short*)input.data_ptr(), (f16x4*)output.data_ptr(), k0 * 64, input.stride(0), input.stride(1));
	return output;
}

__global__ void mlp_up_kernel(const f16x4* input, const f16x4* gate, const f16x4* up, f16x4* output, int n, int k) {
	const int col = k * 64;
	input += blockIdx.y * col | threadIdx.x;
	gate += blockIdx.x * col | threadIdx.x;
	up += blockIdx.x * col | threadIdx.x;
	f32x16 result0 = { }; 
	f32x16 result1 = { };
	for(int i = 0; i < k; ++i) {
		result0 = __builtin_amdgcn_mfma_f32_32x32x8bf16_1k(gate[i * 64], input[i * 64], result0, 0, 0, 0);
		result1 = __builtin_amdgcn_mfma_f32_32x32x8bf16_1k(up[i * 64], input[i * 64], result1, 0, 0, 0);
	}
	result1 *= result0;
	result0 *= -1.442695f;
	__hip_bfloat16 src[16];
#pragma unroll
    for (int i = 0; i < 16; ++i)
		src[i] = (__hip_bfloat16)(result1[i] / (1.0 + __builtin_amdgcn_exp2f(result0[i])));
	output += (blockIdx.y * n + blockIdx.x * 32) * 8 | threadIdx.x;
	output[0] = ((f16x4*)src)[0];
	output[0x40] = ((f16x4*)src)[1];
	output[0x80] = ((f16x4*)src)[2];
	output[0xc0] = ((f16x4*)src)[3];
}

__global__ void matmul_kernel(const f16x4* left, const f16x4* right, f16x4* output, int m, int n, int k) {
	const int col = k * 64;
	int tx = threadIdx.x & 31;
	int ty = threadIdx.x >> 5 & 1;
	int cx = 32 * blockIdx.y;
	int cy = 32 * blockIdx.x;
	left += blockIdx.y * col | threadIdx.x;
	right += blockIdx.x * col | threadIdx.x;
	f32x16 result = { };
	for(int i = 0; i < k; ++i) {
		result = __builtin_amdgcn_mfma_f32_32x32x8bf16_1k(right[i * 64], left[i * 64], result, 0, 0, 0);
	}
	const auto* src = reinterpret_cast<const float(*)[4]>(&result);
	f16x4* dst = output + ((tx + cx) * n + cy) / 4 + ty;
	dst[0] = pack_f32x4_to_u64(src[0]);
	dst[2] = pack_f32x4_to_u64(src[1]);
	dst[4] = pack_f32x4_to_u64(src[2]);
	dst[6] = pack_f32x4_to_u64(src[3]);
}

torch::Tensor matmul(torch::Tensor left, torch::Tensor right, int m, int n, int k) {
	auto options = torch::TensorOptions().dtype(torch::kBFloat16).device(torch::kCUDA);
    torch::Tensor output = torch::empty({m, n}, options);
    matmul_kernel<<<dim3(n / 32, m / 32), dim3(64), 0, 0>>> ((f16x4*)left.data_ptr(), (f16x4*)right.data_ptr(), (f16x4*)output.data_ptr(), m, n, k / 8);
	return output;
}

torch::Tensor mlp(torch::Tensor input, torch::Tensor gate_up, torch::Tensor down, int m, int n, int k) {
	m = (m + 31) >> 5 << 5;
	n = (n + 31) >> 5 << 5;
	k = (k + 7) >> 3 << 3;
    auto options = torch::TensorOptions().dtype(torch::kBFloat16).device(torch::kCUDA);
    torch::Tensor temp = torch::empty(m * n, options);
	torch::Tensor output = torch::empty({m, k}, options);
	f16x4* gate_ptr = (f16x4*)gate_up.data_ptr();
	f16x4* up_ptr = gate_ptr + n * k / 4;
    mlp_up_kernel<<<dim3(n / 32, m / 32), dim3(64), 0, 0>>> ((f16x4*)input.data_ptr(), gate_ptr, up_ptr, (f16x4*)temp.data_ptr(), n, k / 8);
	matmul_kernel<<<dim3(k / 32, m / 32), dim3(64), 0, 0>>> ((f16x4*)temp.data_ptr(), (f16x4*)down.data_ptr(), (f16x4*)output.data_ptr(), m, k, n / 8);
	return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) 
{
  m.def("permute", &permute, "Permute");
  m.def("mlp", &mlp, "MLP");
  m.def("matmul", &matmul, "MLP");
}