Transformers
PyTorch
code
custom_code
Inference Endpoints
codesage commited on
Commit
4a23933
1 Parent(s): 9e71a7f

Create modeling_codesage.py

Browse files
Files changed (1) hide show
  1. modeling_codesage.py +46 -0
modeling_codesage.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4
+
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+
8
+ class CodeSageConfig(PretrainedConfig):
9
+ model_type = "bert"
10
+
11
+ def __init__(
12
+ self,
13
+ vocab_size=50257,
14
+ max_position_embeddings=1024,
15
+ hidden_size=768,
16
+ num_hidden_layers=12,
17
+ num_attention_heads=12,
18
+ intermediate_size=3072,
19
+ activation_function="gelu_new",
20
+ residual_dropout_prob=0.1,
21
+ embedding_dropout_prob=0.1,
22
+ attention_dropout_prob=0.1,
23
+ layer_norm_epsilon=1e-5,
24
+ initializer_range=0.02,
25
+ position_embedding_type='absolute',
26
+ bos_token_id=0,
27
+ eos_token_id=0,
28
+ pad_token_id=49153,
29
+ **kwargs
30
+ ):
31
+ self.vocab_size = vocab_size
32
+ self.max_position_embeddings = max_position_embeddings
33
+ self.hidden_size = hidden_size
34
+ self.num_hidden_layers = num_hidden_layers
35
+ self.num_attention_heads = num_attention_heads
36
+ self.intermediate_size = intermediate_size
37
+ assert 'gelu' in activation_function
38
+ self.activation_function = activation_function
39
+ self.residual_dropout_prob = residual_dropout_prob
40
+ self.embedding_dropout_prob = embedding_dropout_prob
41
+ self.attention_dropout_prob = attention_dropout_prob
42
+ self.layer_norm_epsilon = layer_norm_epsilon
43
+ self.initializer_range = initializer_range
44
+ self.position_embedding_type = position_embedding_type
45
+
46
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)