|
import os
|
|
|
|
import torch
|
|
from torch import nn
|
|
from torch.autograd import Function
|
|
from torch.nn import functional as F
|
|
|
|
|
|
module_path = os.path.dirname(__file__)
|
|
|
|
|
|
class FusedLeakyReLU(nn.Module):
|
|
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
|
|
super().__init__()
|
|
|
|
self.bias = nn.Parameter(torch.zeros(channel))
|
|
self.negative_slope = negative_slope
|
|
self.scale = scale
|
|
|
|
def forward(self, input):
|
|
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
|
|
|
|
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
|
|
if input.device.type == "cpu":
|
|
if bias is not None:
|
|
rest_dim = [1] * (input.ndim - bias.ndim - 1)
|
|
return (
|
|
F.leaky_relu(
|
|
input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
|
|
)
|
|
* scale
|
|
)
|
|
|
|
else:
|
|
return F.leaky_relu(input, negative_slope=0.2) * scale
|
|
|
|
else:
|
|
return FusedLeakyReLUFunction.apply(
|
|
input.contiguous(), bias, negative_slope, scale
|
|
)
|
|
|
|
|