Team Finetuner commited on
Commit
0f4070e
1 Parent(s): 43b8513

chore: update from 07ce15d58b77559fce77ea89e92d398f28663bd9

Browse files
Files changed (3) hide show
  1. configuration_bert.py +4 -45
  2. flash_attn_triton.py +0 -1
  3. modeling_bert.py +1 -27
configuration_bert.py CHANGED
@@ -24,54 +24,10 @@ from transformers.utils import logging
24
 
25
  logger = logging.get_logger(__name__)
26
 
27
- BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
28
- "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
29
- "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
30
- "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
31
- "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
32
- "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
33
- "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
34
- "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
35
- "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
36
- "bert-large-uncased-whole-word-masking": (
37
- "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
38
- ),
39
- "bert-large-cased-whole-word-masking": (
40
- "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
41
- ),
42
- "bert-large-uncased-whole-word-masking-finetuned-squad": (
43
- "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
44
- ),
45
- "bert-large-cased-whole-word-masking-finetuned-squad": (
46
- "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
47
- ),
48
- "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
49
- "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
50
- "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
51
- "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
52
- "cl-tohoku/bert-base-japanese-whole-word-masking": (
53
- "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
54
- ),
55
- "cl-tohoku/bert-base-japanese-char": (
56
- "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
57
- ),
58
- "cl-tohoku/bert-base-japanese-char-whole-word-masking": (
59
- "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
60
- ),
61
- "TurkuNLP/bert-base-finnish-cased-v1": (
62
- "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
63
- ),
64
- "TurkuNLP/bert-base-finnish-uncased-v1": (
65
- "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
66
- ),
67
- "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
68
- # See all BERT models at https://huggingface.co/models?filter=bert
69
- }
70
-
71
 
72
  class JinaBertConfig(PretrainedConfig):
73
  r"""
74
- This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to
75
  instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a
76
  configuration with the defaults will yield a similar configuration to that of the BERT
77
  [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture.
@@ -149,6 +105,9 @@ class JinaBertConfig(PretrainedConfig):
149
 
150
  >>> # Accessing the model configuration
151
  >>> configuration = model.config
 
 
 
152
  ```"""
153
  model_type = "bert"
154
 
 
24
 
25
  logger = logging.get_logger(__name__)
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  class JinaBertConfig(PretrainedConfig):
29
  r"""
30
+ This is the configuration class to store the configuration of a [`JinaBertModel`]. It is used to
31
  instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a
32
  configuration with the defaults will yield a similar configuration to that of the BERT
33
  [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture.
 
105
 
106
  >>> # Accessing the model configuration
107
  >>> configuration = model.config
108
+
109
+ >>> # Encode text inputs
110
+ >>> embeddings = model.encode(text_inputs)
111
  ```"""
112
  model_type = "bert"
113
 
flash_attn_triton.py CHANGED
@@ -1156,5 +1156,4 @@ class FlashAttnFunc(torch.autograd.Function):
1156
  )
1157
  return dq, dk, dv, None, None, None
1158
 
1159
-
1160
  flash_attn_func = FlashAttnFunc.apply
 
1156
  )
1157
  return dq, dk, dv, None, None, None
1158
 
 
1159
  flash_attn_func = FlashAttnFunc.apply
modeling_bert.py CHANGED
@@ -101,33 +101,6 @@ _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'"
101
  _SEQ_CLASS_EXPECTED_LOSS = 0.01
102
 
103
 
104
- BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
105
- "bert-base-uncased",
106
- "bert-large-uncased",
107
- "bert-base-cased",
108
- "bert-large-cased",
109
- "bert-base-multilingual-uncased",
110
- "bert-base-multilingual-cased",
111
- "bert-base-chinese",
112
- "bert-base-german-cased",
113
- "bert-large-uncased-whole-word-masking",
114
- "bert-large-cased-whole-word-masking",
115
- "bert-large-uncased-whole-word-masking-finetuned-squad",
116
- "bert-large-cased-whole-word-masking-finetuned-squad",
117
- "bert-base-cased-finetuned-mrpc",
118
- "bert-base-german-dbmdz-cased",
119
- "bert-base-german-dbmdz-uncased",
120
- "cl-tohoku/bert-base-japanese",
121
- "cl-tohoku/bert-base-japanese-whole-word-masking",
122
- "cl-tohoku/bert-base-japanese-char",
123
- "cl-tohoku/bert-base-japanese-char-whole-word-masking",
124
- "TurkuNLP/bert-base-finnish-cased-v1",
125
- "TurkuNLP/bert-base-finnish-uncased-v1",
126
- "wietsedv/bert-base-dutch-cased",
127
- # See all BERT models at https://huggingface.co/models?filter=bert
128
- ]
129
-
130
-
131
  def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
132
  """Load tf checkpoints in a pytorch model."""
133
  try:
@@ -2396,3 +2369,4 @@ class JinaBertForQuestionAnswering(JinaBertPreTrainedModel):
2396
  hidden_states=outputs.hidden_states,
2397
  attentions=outputs.attentions,
2398
  )
 
 
101
  _SEQ_CLASS_EXPECTED_LOSS = 0.01
102
 
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
105
  """Load tf checkpoints in a pytorch model."""
106
  try:
 
2369
  hidden_states=outputs.hidden_states,
2370
  attentions=outputs.attentions,
2371
  )
2372
+