guillermoruiz commited on
Commit
d685038
1 Parent(s): 1c23911

Upload Bilma

Browse files
Files changed (3) hide show
  1. config.json +1 -1
  2. modeling_bilma.py +26 -7
  3. tf_model.h5 +1 -1
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "architectures": [
3
- "TFAutoModelForMaskedLM"
4
  ],
5
  "auto_map": {
6
  "AutoConfig": "configuration_bilma.BilmaConfig",
 
1
  {
2
  "architectures": [
3
+ "lma"
4
  ],
5
  "auto_map": {
6
  "AutoConfig": "configuration_bilma.BilmaConfig",
modeling_bilma.py CHANGED
@@ -9,7 +9,7 @@ from typing import Dict
9
  import re
10
  import unicodedata
11
 
12
- from .configuration_bilma import BilmaConfig
13
 
14
  # copied from preprocessing.py
15
  BLANK = ' '
@@ -32,9 +32,10 @@ SYMBOLS = set(";:,.@\\-\"/" + SYMBOLS_)
32
 
33
  class Bilma(TFPreTrainedModel):
34
  config_class = BilmaConfig
35
- main_input_name = "input_ids"
36
 
37
  def __init__(self, config):
 
38
  super().__init__(config)
39
  #if config.weights == "spanish":
40
  # my_resources = importlib_resources.files("hf_bilma")
@@ -48,15 +49,33 @@ class Bilma(TFPreTrainedModel):
48
  ff_dim=config.embedding_dim,
49
  vocab_size=config.vocab_size,
50
  rate=config.drop_rate)
51
- self.call(np.zeros((1, config.seq_max_length)))
52
-
 
53
  @property
54
  def dummy_inputs(self) -> Dict[str, tf.Tensor]:
55
- dummies = {"capt_input":self.model.inputs[0]}
 
 
 
 
 
 
 
 
56
  return dummies
57
 
58
- def call(self, tensor):
59
- return self.model(tensor)
 
 
 
 
 
 
 
 
 
60
 
61
 
62
  #
 
9
  import re
10
  import unicodedata
11
 
12
+ from configuration_bilma import BilmaConfig
13
 
14
  # copied from preprocessing.py
15
  BLANK = ' '
 
32
 
33
  class Bilma(TFPreTrainedModel):
34
  config_class = BilmaConfig
35
+ main_input_name = "capt_input"
36
 
37
  def __init__(self, config):
38
+ self.seq_max_length = config.seq_max_length
39
  super().__init__(config)
40
  #if config.weights == "spanish":
41
  # my_resources = importlib_resources.files("hf_bilma")
 
49
  ff_dim=config.embedding_dim,
50
  vocab_size=config.vocab_size,
51
  rate=config.drop_rate)
52
+
53
+ #self.call(np.zeros((1, config.seq_max_length)))
54
+
55
  @property
56
  def dummy_inputs(self) -> Dict[str, tf.Tensor]:
57
+
58
+ dummies = {}
59
+ for key, spec in self.input_signature.items():
60
+ dummy_shape = [dim if dim is not None else 2 for dim in spec.shape]
61
+ if spec.shape[0] is None:
62
+ dummy_shape[0] = 1
63
+ dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype)
64
+
65
+
66
  return dummies
67
 
68
+ @property
69
+ def input_signature(self) -> Dict[str, tf.TensorSpec]:
70
+ sig = {}
71
+ sig["capt_input"] = tf.TensorSpec([None, self.seq_max_length], tf.int32, name="capt_input")
72
+ return sig
73
+
74
+
75
+ def call(self, capt_input):
76
+ #if isinstance(tensor, dict) and len(tensor) == 0:
77
+ # return self.model(self.dummy_inputs)
78
+ return self.model(capt_input)
79
 
80
 
81
  #
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42a62ff751c90b4e9cdf2f33f2744732dcb79a061db2dda2a344477dec95a6ab
3
  size 156561684
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f932984cd1b53af396b362f3b882736143583d47f4c86f356e7ae359b6bcba7c
3
  size 156561684