File size: 4,734 Bytes
f9d7028 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
from fairseq.models import register_model_architecture
from fairseq.models.transformer import base_architecture
@register_model_architecture("transformer", "transformer_2x")
def transformer_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_architecture(args)
@register_model_architecture("transformer", "transformer_4x")
def transformer_huge(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1536)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1536)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_architecture(args)
@register_model_architecture("transformer", "transformer_9x")
def transformer_xlarge(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 2048)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 8192)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 2048)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 8192)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_architecture(args)
@register_model_architecture("transformer", "transformer_12e12d_9xeq")
def transformer_vxlarge(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1536)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1536)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_layers = getattr(args, "decoder_layers", 12)
base_architecture(args)
@register_model_architecture("transformer", "transformer_18_18")
def transformer_deep(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 8 * 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 8 * 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.encoder_layers = getattr(args, "encoder_layers", 18)
args.decoder_layers = getattr(args, "decoder_layers", 18)
base_architecture(args)
@register_model_architecture("transformer", "transformer_24_24")
def transformer_xdeep(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 8 * 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 8 * 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.decoder_layers = getattr(args, "decoder_layers", 24)
base_architecture(args)
|