File size: 1,151 Bytes
93d11ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
from transformers import PretrainedConfig
class FlashSTUConfig(PretrainedConfig):
model_type = "FlashSTU"
def __init__(
self,
bsz: int = 8,
n_embd: int = 768,
n_heads: int = 12,
n_layers: int = 12,
seq_len: int = 4096,
window_size: int = 64,
vocab_size: int = 200064,
mlp_scale: int = 4,
bias: bool = False,
dropout: float = 0.0,
num_eigh: int = 16,
use_hankel_L: bool = False,
use_flash_fft: bool = True,
use_approx: bool = True,
softcap: float = 50.0,
**kwargs,
):
super().__init__(**kwargs)
self.bsz = bsz
self.n_embd = n_embd
self.n_heads = n_heads
self.n_layers = n_layers
self.seq_len = seq_len
self.window_size = window_size
self.vocab_size = vocab_size
self.mlp_scale = mlp_scale
self.bias = bias
self.dropout = dropout
self.num_eigh = num_eigh
self.use_hankel_L = use_hankel_L
self.use_flash_fft = use_flash_fft
self.use_approx = use_approx
self.softcap = softcap
|