#include <torch/torch.h>
#include <iostream>

// 手写矩阵乘法（仅限连续内存的FP32 2D矩阵）
at::Tensor manual_matmul(const at::Tensor& a, const at::Tensor& b) {
	// 输入检查
	TORCH_CHECK(a.dim()  == 2 && b.dim()  == 2, "Inputs must be 2D tensors");
	TORCH_CHECK(a.size(1)  == b.size(0),  "Dimension mismatch: a.shape[1]  != b.shape[0]"); 
	TORCH_CHECK(a.is_contiguous()  && b.is_contiguous(),  "Requires contiguous memory");
	TORCH_CHECK(a.scalar_type()  == at::kFloat && b.scalar_type()  == at::kFloat, "Only FP32 supported");

	// 初始化结果张量
	auto result = torch::zeros({a.size(0),  b.size(1)},  a.options()); 

	// 原生指针访问
	const float* a_data = a.data_ptr<float>(); 
	const float* b_data = b.data_ptr<float>(); 
	float* r_data = result.data_ptr<float>(); 

	// 三重循环计算
	for (int64_t i = 0; i < a.size(0);  ++i) {
		for (int64_t j = 0; j < b.size(1);  ++j) {
			float sum = 0.0f;
			for (int64_t k = 0; k < a.size(1);  ++k) {
				sum += a_data[i * a.size(1)  + k] * b_data[k * b.size(1)  + j];
			}
			r_data[i * b.size(1)  + j] = sum;
		}
	}
	return result;
}

int main() {
	// 构造测试数据（非连续内存测试）
	auto a = torch::randn({3, 4});  // 转置制造非连续内存
	auto b = torch::randn({4, 5});

	// LibTorch官方实现
	auto libtorch_result = at::matmul(a, b);
	std::cout << "LibTorch matmul result:\n" << libtorch_result << std::endl;

	// 手写实现（需转为连续内存）
	auto manual_result = manual_matmul(a.contiguous(),  b.contiguous()); 
	std::cout << "Manual matmul result:\n" << manual_result << std::endl;

	// 结果差异检查
	auto diff = torch::abs(libtorch_result - manual_result);
	std::cout << "Max difference: " << diff.max().item<float>()  << std::endl;

	return 0;
}
