|
def ba_activation(x, weights, a, epsilon): |
|
|
|
x = torch.as_tensor(x, dtype=torch.float32) |
|
|
|
|
|
x = weights * x |
|
|
|
|
|
|
|
x_normalized = torch.clamp(x, -1, 1) |
|
fractional_inspired = torch.pow(torch.abs(x_normalized), x_normalized) |
|
activation_result = epsilon * torch.cos(np.pi * a * fractional_inspired * torch.log(torch.abs(fractional_inspired) + 1e-7)) |
|
|
|
|
|
activation_result = torch.tanh(activation_result) |
|
|
|
return activation_result |
|
|
|
|
|
class CustomModel(nn.Module): |
|
def __init__(self, input_size, hidden_size, output_size): |
|
super(CustomModel, self).__init__() |
|
self.linear1 = nn.Linear(input_size, hidden_size) |
|
self.linear2 = nn.Linear(hidden_size, output_size) |
|
self.weights = nn.Parameter(torch.randn(hidden_size)) |
|
self.a = 0.5 |
|
self.epsilon = 0.1 |
|
|
|
def forward(self, x): |
|
x = self.linear1(x) |
|
x = ba_activation(x, self.weights, self.a, self.epsilon) |
|
x = self.linear2(x) |
|
return x |