#!/usr/bin/python3
# coding=utf-8
#
# Copyright (C) 2023-2024. Huawei Technologies Co., Ltd. All rights reserved.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# ===============================================================================

import os
import torch
import numpy as np

def gen_golden_data_simple():
    torch.manual_seed(42)

    # 前向输入
    x = (torch.rand(8, 2048) * 20 - 10).requires_grad_(True)   # shape=[8,2048]
    # 上游梯度
    dy = torch.randn(8, 2048)

    # 前向：LeakyReLU
    # out是前向传播的输出，即LeakyReLU激活后的结果
    out = torch.nn.functional.leaky_relu(x, negative_slope=0.1)

    # 反向：使用 autograd 计算梯度
    # dx是反向传播的梯度，即对x的导数
    dx, = torch.autograd.grad(outputs=out,
                              inputs=x,
                              grad_outputs=dy,
                              retain_graph=False)

    # 保存所有数据
    os.makedirs("./input", exist_ok=True)
    os.makedirs("./output", exist_ok=True)
    
    # 保存输入数据
    x.detach().numpy().tofile("./input/input_x.bin")
    dy.numpy().tofile("./input/input_dy.bin")
    
    # 保存前向输出结果
    # out.detach().numpy().tofile("./output/golden_out.bin")
    
    # 保存反向梯度结果
    dx.numpy().tofile("./output/golden.bin")

    print("Data generation completed:")
    print(f"  - Input x shape: {x.shape}, range: [{x.min():.3f}, {x.max():.3f}]")
    print(f"  - Input dy shape: {dy.shape}, range: [{dy.min():.3f}, {dy.max():.3f}]")
    print(f"  - Forward output out shape: {out.shape}, range: [{out.min():.3f}, {out.max():.3f}]")
    print(f"  - Backward gradient dx shape: {dx.shape}, range: [{dx.min():.3f}, {dx.max():.3f}]")
    print(f"  - Alpha (negative_slope): 0.1")

if __name__ == "__main__":
    gen_golden_data_simple()