|
import torch |
|
from common.diff_engine import DiffCase |
|
|
|
import activation |
|
|
|
|
|
class FusedAddRMSNorm(torch.nn.Module): |
|
|
|
def __init__(self, d, eps=1e-6, dtype: torch.dtype = torch.float32): |
|
super().__init__() |
|
self.weight = torch.nn.Parameter(torch.ones(d, dtype=dtype)) |
|
self.eps = eps |
|
|
|
def forward(self, x, residual): |
|
h = x + residual |
|
return activation.rms_norm(h, self.weight, self.eps), h |
|
|
|
|
|
class AddRMS(DiffCase): |
|
|
|
def build_inputs(self, bs, sl, hidden, dtype, eps): |
|
return { |
|
"x": |
|
torch.randn(bs, sl, hidden, dtype=dtype, requires_grad=True), |
|
"residual": |
|
torch.randn(bs, sl, hidden, dtype=dtype, requires_grad=True), |
|
"weight": |
|
torch.ones(hidden, dtype=dtype), |
|
"dim": |
|
hidden, |
|
"eps": |
|
eps, |
|
"dtype": |
|
dtype, |
|
} |
|
|
|
def make_naive(self, I): |
|
m = FusedAddRMSNorm(I["dim"], I["eps"], dtype=I["dtype"]) |
|
m.weight = torch.nn.Parameter(I["weight"].detach().clone()) |
|
return m |
|
|
|
def make_cuda(self, I): |
|
m = activation.layers.FusedAddRMSNorm(I["dim"], |
|
I["eps"], |
|
dtype=I["dtype"]) |
|
m.weight = torch.nn.Parameter(I["weight"].detach().clone()) |
|
return m |
|
|
|
def forward(self, obj, I): |
|
return obj(I["x"], I["residual"]) |
|
|
|
def grad_inputs(self, I): |
|
return [I["x"], I["residual"]] |
|
|
|
|
|
CASE = AddRMS() |
|
|