import torch
import copy
import numpy    as np
import torch.nn as nn
from   torch.utils.cpp_extension import load

#! pytorch自动编译，生成对应的模块
softmax = load(name="softmax", sources=["softmax.cc"])

#! 2. 定义可以进行前项和后算运算GeLU算子
class MySoftmaxAutoGrad(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x):
        y = softmax.forward(x)
        ctx.save_for_backward(y)       # 保存上下文用于反向传播
        return y
    
    @staticmethod
    def backward(ctx, grad_output):   # y.backward(torch.ones_like(y)) 传进来的
        y, = ctx.saved_tensors
        # print("y >> ", y)
        return softmax.backward(grad_output, y)
    

class Softmax(nn.Module):
    def __init__(self):
        super(Softmax, self).__init__()
    
    def forward(self, x):
        return MySoftmaxAutoGrad.apply(x)
    
#! 3. 测试
for i in range(100):
    # x = torch.rand(10, dtype=torch.float32, requires_grad=True)
    data   = np.array([1, 1, 1, 1, 4])
    x      = torch.tensor(data, dtype=torch.float32, requires_grad=True)

    # net = Softmax()
    # y   = net(x)

    # xx  = copy.deepcopy(x)
    # mod = nn.Softmax(dim=0)
    # yy  = mod(xx)

    # print(y, yy)

    # print("===============================================")
    # y.backward(torch.ones_like(y), retain_graph=True)     

    # yy.backward(torch.ones_like(yy), retain_graph=True)     
    # print(x.grad, xx.grad)

    flag = torch.autograd.gradcheck(MySoftmaxAutoGrad.apply, (x, ), eps=1e-3)

    print(f"success = {flag}")