#include <torch/torch.h> 
#include <cmath> 
#include <iostream> 
#include <iomanip> 
 
// 原生C++实现RMSNorm 
#if 0
at::Tensor rms_norm_cpu(const at::Tensor& input, const at::Tensor& weight, double epsilon) {
    // Step 1: 获取原始数据指针
    float* input_data = reinterpret_cast<float*>(input.storage().data()); 
    float* output_data = reinterpret_cast<float*>(output.storage().data()); 
    float* weight_data = reinterpret_cast<float*>(weight.storage().data()); 

    at::Tensor output;
 
    // Step 2: 获取张量维度 
    int64_t batch_size = input.storage().sizes()[0]; 
    int64_t feature_dim = input.storage().sizes()[1]; 
 
    // Step 3: 计算 RMSNorm
    for (int b = 0; b < batch_size; ++b) {
        float sum_sq = 0.0f;
        for (int f = 0; f < feature_dim; ++f) {
            float val = input_data[b * feature_dim + f];
            sum_sq += val * val;
        }
        float inv_rms = 1.0f / std::sqrt(sum_sq / feature_dim + epsilon);
        for (int f = 0; f < feature_dim; ++f) {
            output_data[b * feature_dim + f] =
                input_data[b * feature_dim + f] * inv_rms * weight_data[f];
        }
    }
    return output;
}
#endif

at::Tensor rms_norm_native(const at::Tensor& input, const at::Tensor& weight, double eps = 1e-5) { 
    // 计算输入的均方根 (RMS) 
    at::Tensor input_squared = input.square();  
    at::Tensor mean_squared = input_squared.mean(-1,  true);  // 最后一维求均值，保持维度 
    at::Tensor rms = (mean_squared + eps).sqrt(); 
    
    // 归一化并应用权重 
    at::Tensor normalized = input / rms; 
    return normalized * weight; 
} 
 
// ATen接口实现RMSNorm 
at::Tensor rms_norm_aten(const at::Tensor& input, const at::Tensor& weight, double eps = 1e-5) { 
    // ATen接口要求指定归一化维度（最后一维） 
    int64_t normalized_shape = input.size(-1);  
    return at::rms_norm(input, {normalized_shape}, weight, eps); 
} 
 
// 验证两种实现的精度 
bool verify_precision(const at::Tensor& aten_result, const at::Tensor& native_result, double tolerance = 1e-6) { 
    at::Tensor abs_diff = at::abs(aten_result - native_result); 
    double max_diff = abs_diff.max().item<double>();  
    std::cout << "最大绝对误差: " << std::setprecision(10) << max_diff << std::endl; 
    return max_diff < tolerance; 
} 
 
int main() { 
    // 设置随机种子 
    torch::manual_seed(42); 
    
    // 生成测试数据 (batch_size=2, seq_len=3, hidden_dim=4) 
    at::Tensor input = torch::randn({2, 3, 4});  // 随机输入张量 
    at::Tensor weight = torch::randn({4});       // 权重张量（维度与输入最后一维一致） 
    double eps = 1e-5; 
    
    std::cout << "input: " << std::endl;
    std::cout << input << std::endl;

    std::cout << "weight: " << std::endl;
    std::cout << weight << std::endl;

    // 两种实现的前向计算 
    at::Tensor aten_out = rms_norm_aten(input, weight, eps); 
    at::Tensor native_out = rms_norm_native(input, weight, eps); 
    // at::Tensor cpu_out = rms_norm_cpu(input, weight, eps); 

    std::cout << "aten out: " << std::endl;
    std::cout << aten_out << std::endl;
    std::cout << "native out: " << std::endl;
    std::cout << native_out << std::endl;
    // std::cout << "cpu out: " << std::endl;
    // std::cout << cpu_out << std::endl;
    
    // 验证精度 
    bool passed = verify_precision(aten_out, native_out); 
    
    // 输出结果 
    std::cout << "测试结果: " << (passed ? "PASS" : "FAIL") << std::endl; 
    return 0; 
} 
