guillermoruiz commited on
Commit
fbd0a16
1 Parent(s): 7666933

Upload TFBilma

Browse files
Files changed (3) hide show
  1. config.json +1 -1
  2. configuration_bilma.py +1 -1
  3. tf_model.h5 +1 -1
config.json CHANGED
@@ -9,7 +9,7 @@
9
  },
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 512,
12
- "model_type": "TFBilma",
13
  "num_attention_heads": 4,
14
  "num_hidden_layers": 2,
15
  "seq_max_length": 280,
 
9
  },
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 512,
12
+ "model_type": "Bilma",
13
  "num_attention_heads": 4,
14
  "num_hidden_layers": 2,
15
  "seq_max_length": 280,
configuration_bilma.py CHANGED
@@ -1,7 +1,7 @@
1
  from transformers import PretrainedConfig
2
 
3
  class BilmaConfig(PretrainedConfig):
4
- model_type = "TFBilma"
5
 
6
  def __init__(
7
  self,
 
1
  from transformers import PretrainedConfig
2
 
3
  class BilmaConfig(PretrainedConfig):
4
+ model_type = "Bilma"
5
 
6
  def __init__(
7
  self,
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b59ee0afffe8c9dd59ccfd63b8a32bba0cd2dfade0d70e124436d9ff773f908
3
  size 156562964
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fa26aa874976519680daa037613b66a5edc0021c69039e2b4e01f20e499f690
3
  size 156562964