#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>

#define BLOCK_SIZE 16  // 每个线程块的大小

// 核函数：矩阵乘法
__global__ void matrixMul(float* C, float* A, float* B, int M, int N, int K) {
	// 线程索引
	int row = blockIdx.y * blockDim.y + threadIdx.y;
	int col = blockIdx.x * blockDim.x + threadIdx.x;

	// 临时变量用于存储结果
	float sum = 0.0;

	// 计算矩阵乘法
	if (row < M && col < N) {
		for (int i = 0; i < K; ++i) {
			sum += A[row * K + i] * B[i * N + col];
		}
		C[row * N + col] = sum;
	}
}

// CPU 上的矩阵乘法实现
void matrixMulCPU(float* C, float* A, float* B, int M, int N, int K) {
	for (int row = 0; row < M; ++row) {
		for (int col = 0; col < N; ++col) {
			float sum = 0.0;
			for (int i = 0; i < K; ++i) {
				sum += A[row * K + i] * B[i * N + col];
			}
			C[row * N + col] = sum;
		}
	}
}

// 检查矩阵乘法结果是否一致
bool checkResult(float* C1, float* C2, int M, int N) {
	for (int i = 0; i < M * N; ++i) {
		if (fabs(C1[i] - C2[i]) > 1e-5) {
			return false;
		}
	}
	return true;
}

int main() {
	int M = 1024;  // 矩阵 A 的行数
	int N = 4096;  // 矩阵 B 的列数
	int K = 4096;  // 矩阵 A 的列数和矩阵 B 的行数
	size_t size_A = M * K * sizeof(float);
	size_t size_B = K * N * sizeof(float);
	size_t size_C = M * N * sizeof(float);

	// 分配主机内存
	float* h_A = (float*)malloc(size_A);
	float* h_B = (float*)malloc(size_B);
	float* h_C_GPU = (float*)malloc(size_C);  // GPU 计算结果
	float* h_C_CPU = (float*)malloc(size_C);  // CPU 计算结果

	// 初始化矩阵 A 和 B
	for (int i = 0; i < M * K; ++i) {
		h_A[i] = (float)(rand() % 10);
	}
	for (int i = 0; i < K * N; ++i) {
		h_B[i] = (float)(rand() % 10);
	}

	// 分配设备内存
	float* d_A, * d_B, * d_C;
	cudaMalloc((void**)&d_A, size_A);
	cudaMalloc((void**)&d_B, size_B);
	cudaMalloc((void**)&d_C, size_C);

	// 将数据从主机复制到设备
	cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
	cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);

	// 计算网格和线程块的维度
	dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
	dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE);

	// 创建 CUDA Event
	cudaEvent_t start, stop;
	cudaEventCreate(&start);
	cudaEventCreate(&stop);


	// 测试代码执行时间
	cudaEventRecord(start);
	// 调用核函数
	matrixMul<<<dimGrid, dimBlock>>>(d_C, d_A, d_B, M, N, K);
	cudaEventRecord(stop);
	cudaEventSynchronize(stop);
	float naiveTime;
	cudaEventElapsedTime(&naiveTime, start, stop);
	printf("GPU 执行时间: %.3f ms\n", naiveTime);
	double throughput = (double)((double)M*N*K*2.0) / naiveTime * 1e-9;
	printf("Throughput: %f TFLOPS\n", throughput);

	// 检查 CUDA 是否执行成功
	cudaError_t cudaStatus = cudaGetLastError();
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "matrixMul launch failed: %s\n", cudaGetErrorString(cudaStatus));
		return 1;
	}

	// 同步设备
	cudaStatus = cudaDeviceSynchronize();
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching matrixMul!\n", cudaStatus);
		return 1;
	}

	// 将结果从设备复制回主机
	cudaMemcpy(h_C_GPU, d_C, size_C, cudaMemcpyDeviceToHost);

	// 在 CPU 上计算矩阵乘法
	matrixMulCPU(h_C_CPU, h_A, h_B, M, N, K);

	// 比较 GPU 和 CPU 的计算结果
	bool result = checkResult(h_C_GPU, h_C_CPU, M, N);
	if (result) {
		printf("验证通过：GPU 和 CPU 的计算结果一致。\n");
	} else {
		printf("验证失败：GPU 和 CPU 的计算结果不一致。\n");
	}

	// 释放设备内存
	cudaFree(d_A);
	cudaFree(d_B);
	cudaFree(d_C);

	// 释放主机内存
	free(h_A);
	free(h_B);
	free(h_C_GPU);
	free(h_C_CPU);

	return 0;
}
