| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import torch |
| import torch.nn.functional as F |
| from torch import nn |
|
|
| from ..utils import USE_PEFT_BACKEND |
| from .lora import LoRACompatibleLinear |
|
|
|
|
| ACTIVATION_FUNCTIONS = { |
| "swish": nn.SiLU(), |
| "silu": nn.SiLU(), |
| "mish": nn.Mish(), |
| "gelu": nn.GELU(), |
| "relu": nn.ReLU(), |
| } |
|
|
|
|
| def get_activation(act_fn: str) -> nn.Module: |
| """Helper function to get activation function from string. |
| |
| Args: |
| act_fn (str): Name of activation function. |
| |
| Returns: |
| nn.Module: Activation function. |
| """ |
|
|
| act_fn = act_fn.lower() |
| if act_fn in ACTIVATION_FUNCTIONS: |
| return ACTIVATION_FUNCTIONS[act_fn] |
| else: |
| raise ValueError(f"Unsupported activation function: {act_fn}") |
|
|
|
|
| class GELU(nn.Module): |
| r""" |
| GELU activation function with tanh approximation support with `approximate="tanh"`. |
| |
| Parameters: |
| dim_in (`int`): The number of channels in the input. |
| dim_out (`int`): The number of channels in the output. |
| approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation. |
| """ |
|
|
| def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"): |
| super().__init__() |
| self.proj = nn.Linear(dim_in, dim_out) |
| self.approximate = approximate |
|
|
| def gelu(self, gate: torch.Tensor) -> torch.Tensor: |
| if gate.device.type != "mps": |
| return F.gelu(gate, approximate=self.approximate) |
| |
| return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) |
|
|
| def forward(self, hidden_states): |
| hidden_states = self.proj(hidden_states) |
| hidden_states = self.gelu(hidden_states) |
| return hidden_states |
|
|
|
|
| class GEGLU(nn.Module): |
| r""" |
| A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function. |
| |
| Parameters: |
| dim_in (`int`): The number of channels in the input. |
| dim_out (`int`): The number of channels in the output. |
| """ |
|
|
| def __init__(self, dim_in: int, dim_out: int): |
| super().__init__() |
| linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear |
|
|
| self.proj = linear_cls(dim_in, dim_out * 2) |
|
|
| def gelu(self, gate: torch.Tensor) -> torch.Tensor: |
| if gate.device.type != "mps": |
| return F.gelu(gate) |
| |
| return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) |
|
|
| def forward(self, hidden_states, scale: float = 1.0): |
| args = () if USE_PEFT_BACKEND else (scale,) |
| hidden_states, gate = self.proj(hidden_states, *args).chunk(2, dim=-1) |
| return hidden_states * self.gelu(gate) |
|
|
|
|
| class ApproximateGELU(nn.Module): |
| r""" |
| The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this |
| [paper](https://arxiv.org/abs/1606.08415). |
| |
| Parameters: |
| dim_in (`int`): The number of channels in the input. |
| dim_out (`int`): The number of channels in the output. |
| """ |
|
|
| def __init__(self, dim_in: int, dim_out: int): |
| super().__init__() |
| self.proj = nn.Linear(dim_in, dim_out) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| x = self.proj(x) |
| return x * torch.sigmoid(1.702 * x) |
|
|