bwang0911 commited on
Commit
9f46b9b
1 Parent(s): 39192fe

Upload 9 files

Browse files
bert_padding (4).py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 MosaicML Examples authors
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Adapted from https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/bert_padding.py
5
+ # Which was adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
6
+
7
+ """Helper functions for padding and unpadding batches. """
8
+
9
+ from typing import Tuple, cast
10
+
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from einops import rearrange, repeat
14
+
15
+
16
+ class IndexFirstAxis(torch.autograd.Function):
17
+ @staticmethod
18
+ def forward(ctx, input: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
19
+ """Get just the values of `input` which are at `indices`.
20
+
21
+ Arguments:
22
+ ctx: the autograd context object
23
+ input: (b, ...) 2+ dimensional tensor
24
+ indices: (num_idx) 1D tensor
25
+ """
26
+ ctx.save_for_backward(indices)
27
+ assert input.ndim >= 2
28
+ ctx.first_axis_dim, other_shape = (
29
+ input.shape[0],
30
+ input.shape[1:],
31
+ ) # type: ignore
32
+ second_dim = other_shape.numel() # product of sizes of all but first dimension
33
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
34
+ return torch.gather(
35
+ rearrange(input, 'b ... -> b (...)'), # (b, ...) -> (b, second_dim)
36
+ 0,
37
+ repeat(
38
+ indices, 'z -> z d', d=second_dim
39
+ ), # (indices,) -> (indices, second_dim)
40
+ ).reshape(
41
+ -1, *other_shape
42
+ ) # (num_idx, ...)
43
+
44
+ @staticmethod
45
+ def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]:
46
+ (indices,) = ctx.saved_tensors
47
+ assert grad_output.ndim >= 2
48
+ other_shape = grad_output.shape[1:]
49
+ grad_output = rearrange(grad_output, 'b ... -> b (...)')
50
+ grad_input = torch.zeros(
51
+ [ctx.first_axis_dim, grad_output.shape[1]],
52
+ device=grad_output.device,
53
+ dtype=grad_output.dtype,
54
+ )
55
+ # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
56
+ # grad_input[indices] = grad_output
57
+ grad_input.scatter_(
58
+ 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]), grad_output
59
+ )
60
+ return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
61
+
62
+
63
+ index_first_axis = IndexFirstAxis.apply
64
+
65
+
66
+ class IndexPutFirstAxis(torch.autograd.Function):
67
+ @staticmethod
68
+ def forward(
69
+ ctx, values: torch.Tensor, indices: torch.Tensor, first_axis_dim
70
+ ) -> torch.Tensor:
71
+ ctx.save_for_backward(indices)
72
+ assert indices.ndim == 1
73
+ assert values.ndim >= 2
74
+ output = torch.zeros(
75
+ first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
76
+ )
77
+ output[indices] = values
78
+ return output
79
+
80
+ @staticmethod
81
+ def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None, None]:
82
+ (indices,) = ctx.saved_tensors
83
+ grad_values = grad_output[indices]
84
+ return grad_values, None, None
85
+
86
+
87
+ index_put_first_axis = IndexPutFirstAxis.apply
88
+
89
+
90
+ def unpad_input(
91
+ hidden_states: torch.Tensor,
92
+ attention_mask: torch.Tensor,
93
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
94
+ """Remove padding from input sequences.
95
+
96
+ Arguments:
97
+ hidden_states: (batch, seqlen, ...)
98
+ attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
99
+
100
+ Returns:
101
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
102
+ indices: (total_nnz)
103
+ cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
104
+ max_seqlen_in_batch: int ()
105
+ """
106
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
107
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
108
+ max_seqlen_in_batch = int(seqlens_in_batch.max().item())
109
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
110
+ # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
111
+ # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
112
+ # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
113
+ # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
114
+ # so we write custom forward and backward to make it a bit faster.
115
+ hidden_states = cast(
116
+ torch.Tensor,
117
+ index_first_axis(rearrange(hidden_states, 'b s ... -> (b s) ...'), indices),
118
+ )
119
+ return hidden_states, indices, cu_seqlens, max_seqlen_in_batch
120
+
121
+
122
+ def unpad_input_only(
123
+ hidden_states: torch.Tensor,
124
+ attention_mask: torch.Tensor,
125
+ ) -> torch.Tensor:
126
+ """Like unpad_input, but only return the unpadded first tensor.
127
+
128
+ Save a small amount of overhead.
129
+
130
+ Arguments:
131
+ hidden_states: (batch, seqlen, ...)
132
+ attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
133
+
134
+ Returns:
135
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
136
+ """
137
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
138
+ return index_first_axis(rearrange(hidden_states, 'b s ... -> (b s) ...'), indices)
139
+
140
+
141
+ def pad_input(
142
+ hidden_states: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int
143
+ ) -> torch.Tensor:
144
+ """Add padding to sequences.
145
+
146
+ Arguments:
147
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
148
+ indices: (total_nnz)
149
+ batch: int batch_size
150
+ seqlen: int max sequence length
151
+
152
+ Returns:
153
+ hidden_states: (batch, seqlen, ...)
154
+ """
155
+ output = index_put_first_axis(hidden_states, indices, batch * seqlen)
156
+ return rearrange(output, '(b s) ... -> b s ...', b=batch)
config (6).json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "jinaai/jina-bert-b-en-v1",
3
+ "model_max_length": 8192,
4
+ "architectures": [
5
+ "JBertForMaskedLM"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_jbert.JBertConfig",
10
+ "AutoModelForMaskedLM": "modeling_jbert.JBertForMaskedLM",
11
+ "AutoModel": "modeling_jbert.JBertModel",
12
+ "AutoModelForSequenceClassification": "modeling_jbert.JBertForSequenceClassification"
13
+ },
14
+ "classifier_dropout": null,
15
+ "gradient_checkpointing": false,
16
+ "hidden_act": "gelu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_size": 768,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "layer_norm_eps": 1e-12,
22
+ "max_position_embeddings": 512,
23
+ "model_type": "bert",
24
+ "num_attention_heads": 12,
25
+ "num_hidden_layers": 12,
26
+ "pad_token_id": 0,
27
+ "position_embedding_type": "absolute",
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.26.0",
30
+ "type_vocab_size": 2,
31
+ "use_cache": true,
32
+ "vocab_size": 30528
33
+ }
configuration_jbert (3).py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 MosaicML Examples authors
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ from transformers import BertConfig as TransformersBertConfig
5
+
6
+
7
+ class JBertConfig(TransformersBertConfig):
8
+ def __init__(
9
+ self,
10
+ model_max_length: int = 8192,
11
+ attention_probs_dropout_prob: float = 0.0,
12
+ **kwargs,
13
+ ):
14
+ """Configuration class for MosaicBert.
15
+
16
+ Args:
17
+ model_max_length (int): Use `model_max_length` to determine how large of an alibi tensor to
18
+ create when initializing the model. You should be able to ignore this parameter in most cases.
19
+ Defaults to 8192.
20
+ attention_probs_dropout_prob (float): By default, turn off attention dropout in Mosaic BERT
21
+ (otherwise, Flash Attention will be off by default). Defaults to 0.0.
22
+ """
23
+ super().__init__(
24
+ attention_probs_dropout_prob=attention_probs_dropout_prob, **kwargs
25
+ )
26
+ self.model_max_length = model_max_length
generation_config (3).json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "pad_token_id": 0,
4
+ "transformers_version": "4.26.0"
5
+ }
gitattributes (3).txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
modeling_jbert (3).py ADDED
@@ -0,0 +1,908 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 MosaicML Examples authors
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
5
+ # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
6
+ # Copyright (c) 2022, Tri Dao.
7
+
8
+ import copy
9
+ import logging
10
+ import math
11
+ import warnings
12
+ from typing import List, Optional, Tuple, Union
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ from einops import rearrange
17
+ from transformers.activations import ACT2FN
18
+ from transformers.modeling_outputs import (
19
+ MaskedLMOutput,
20
+ SequenceClassifierOutput,
21
+ BaseModelOutputWithPastAndCrossAttentions,
22
+ BaseModelOutputWithPoolingAndCrossAttentions,
23
+ )
24
+ from transformers.models.bert.modeling_bert import BertPreTrainedModel
25
+
26
+ from .bert_padding import (index_first_axis, index_put_first_axis, pad_input,
27
+ unpad_input, unpad_input_only)
28
+ from .configuration_jbert import JBertConfig
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ class JBertEmbeddings(nn.Module):
34
+ """Construct the embeddings for words, ignoring position.
35
+
36
+ There are no positional embeddings since we use ALiBi and token_type
37
+ embeddings.
38
+
39
+ This module is modeled after the Hugging Face BERT's
40
+ :class:`~transformers.model.bert.modeling_bert.BertEmbeddings`, but is
41
+ modified to implement ALiBi. The key change is
42
+ that position embeddings are removed. Position information instead comes
43
+ from attention biases that scale linearly with the position distance
44
+ between query and key tokens.
45
+
46
+ This module ignores the `position_ids` input to the `forward` method.
47
+ """
48
+
49
+ def __init__(self, config):
50
+ super().__init__()
51
+ self.word_embeddings = nn.Embedding(
52
+ config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
53
+ )
54
+ # ALiBi doesn't use position embeddings
55
+ self.token_type_embeddings = nn.Embedding(
56
+ config.type_vocab_size, config.hidden_size
57
+ )
58
+
59
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model
60
+ # variable name and be able to load any TensorFlow checkpoint file
61
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
62
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
63
+ self.register_buffer(
64
+ "token_type_ids", torch.zeros((1, config.model_max_length), dtype=torch.long), persistent=False
65
+ )
66
+
67
+ def forward(
68
+ self,
69
+ input_ids: Optional[torch.LongTensor] = None,
70
+ token_type_ids: Optional[torch.LongTensor] = None,
71
+ position_ids: Optional[torch.LongTensor] = None,
72
+ inputs_embeds: Optional[torch.FloatTensor] = None,
73
+ past_key_values_length: int = 0,
74
+ ) -> torch.Tensor:
75
+ if (input_ids is not None) == (inputs_embeds is not None):
76
+ raise ValueError('Must specify either input_ids or input_embeds!')
77
+ if input_ids is not None:
78
+ input_shape = input_ids.size()
79
+ else:
80
+ assert inputs_embeds is not None # just for type checking
81
+ input_shape = inputs_embeds.size()[:-1]
82
+
83
+ seq_length = input_shape[1]
84
+
85
+ if position_ids is not None:
86
+ warnings.warn('position_ids is not used in JBertEmbeddings as it does not have position embeddings.')
87
+
88
+ # Setting the token_type_ids to the registered buffer in constructor
89
+ # where it is all zeros, which usually occurs when it's auto-generated;
90
+ # registered buffer helps users when tracing the model without passing
91
+ # token_type_ids, solves issue #5664
92
+ if token_type_ids is None:
93
+ if hasattr(self, 'token_type_ids'):
94
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
95
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
96
+ input_shape[0], seq_length
97
+ )
98
+ token_type_ids = buffered_token_type_ids_expanded # type: ignore
99
+ else:
100
+ token_type_ids = torch.zeros(
101
+ input_shape, # type: ignore
102
+ dtype=torch.long,
103
+ device=self.word_embeddings.device,
104
+ ) # type: ignore # yapf: disable
105
+
106
+ if inputs_embeds is None:
107
+ inputs_embeds = self.word_embeddings(input_ids)
108
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
109
+
110
+ embeddings = inputs_embeds + token_type_embeddings
111
+ embeddings = self.LayerNorm(embeddings)
112
+ embeddings = self.dropout(embeddings)
113
+ return embeddings
114
+
115
+
116
+ class BertUnpadSelfAttention(nn.Module):
117
+ """Performs multi-headed self attention on a batch of unpadded sequences.
118
+
119
+ If Triton is installed, this module uses Flash Attention to greatly improve throughput.
120
+ The Flash Attention implementation used in Mosaic BERT supports arbitrary attention biases (which
121
+ we use to implement ALiBi), but does not support attention dropout. If either Triton is not installed
122
+ or `config.attention_probs_dropout_prob > 0`, the implementation will default to a
123
+ math-equivalent pytorch version, which is much slower.
124
+
125
+ See `forward` method for additional detail.
126
+ """
127
+
128
+ def __init__(self, config):
129
+ super().__init__()
130
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
131
+ config, 'embedding_size'
132
+ ):
133
+ raise ValueError(
134
+ f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention '
135
+ f'heads ({config.num_attention_heads})'
136
+ )
137
+
138
+ self.num_attention_heads = config.num_attention_heads
139
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
140
+ # TODO: self.all_head_size == config.hidden_size? Why not just use config.hidden_size?
141
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
142
+
143
+ self.Wqkv = nn.Linear(self.all_head_size, 3 * config.hidden_size)
144
+
145
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
146
+
147
+ def forward(
148
+ self,
149
+ hidden_states: torch.Tensor,
150
+ cu_seqlens: torch.Tensor,
151
+ max_seqlen_in_batch: int,
152
+ indices: torch.Tensor,
153
+ attn_mask: torch.Tensor,
154
+ bias: torch.Tensor,
155
+ ) -> torch.Tensor:
156
+ """Perform self-attention.
157
+
158
+ If dropout is zero, then we can use the Triton kernel, so we do that. However, if not, we send through a standard PyTorch
159
+ implementation of self-attention.
160
+
161
+ The arguments are unpadded, and our implementations of attention require padded arguments,
162
+ so we first call `pad_input`. Once we compute attention, we re-unpad our outputs for the other layers.
163
+ The pad/unpad operations add overhead, but not sending pad tokens through ffs saves compute.
164
+ It is possible to write an unpadded implementation of attention (in Triton and PyTorch), which we will eventually do.
165
+
166
+ Args:
167
+ hidden_states: (total_nnz, dim)
168
+ cu_seqlens: (batch + 1,)
169
+ max_seqlen_in_batch: int
170
+ indices: (total_nnz,)
171
+ attn_mask: (batch, max_seqlen_in_batch)
172
+ bias: (batch, heads, max_seqlen_in_batch, max_seqlen_in_batch)
173
+
174
+ Returns:
175
+ attention: (total_nnz, dim)
176
+ """
177
+ qkv = self.Wqkv(hidden_states)
178
+ qkv = pad_input(
179
+ qkv, indices, cu_seqlens.shape[0] - 1, max_seqlen_in_batch
180
+ ) # batch, max_seqlen_in_batch, thd
181
+ qkv = rearrange(
182
+ qkv, 'b s (t h d) -> b s t h d', t=3, h=self.num_attention_heads
183
+ )
184
+ # if we have nonzero attention dropout (e.g. during fine-tuning) or no Triton, compute attention in PyTorch
185
+ q = qkv[:, :, 0, :, :].permute(0, 2, 1, 3) # b h s d
186
+ k = qkv[:, :, 1, :, :].permute(0, 2, 3, 1) # b h d s
187
+ v = qkv[:, :, 2, :, :].permute(0, 2, 1, 3) # b h s d
188
+ attention_scores = torch.matmul(q, k) / math.sqrt(self.attention_head_size)
189
+ attention_scores = attention_scores + bias
190
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
191
+ attention_probs = self.dropout(attention_probs)
192
+ attention_probs = attention_probs.to(dtype=v.dtype)
193
+ attention = torch.matmul(attention_probs, v).permute(0, 2, 1, 3) # b s h
194
+
195
+ # attn_mask is 1 for attend and 0 for don't
196
+ attention = unpad_input_only(attention, torch.squeeze(attn_mask) == 1)
197
+ return rearrange(attention, 'nnz h d -> nnz (h d)')
198
+
199
+
200
+ # Copy of transformer's library BertSelfOutput that will not be caught by surgery methods looking for HF BERT modules.
201
+ class BertSelfOutput(nn.Module):
202
+ """Computes the output of the attention layer.
203
+
204
+ This module is modeled after the Hugging Face BERT's
205
+ :class:`~transformers.model.bert.modeling_bert.BertSelfOutput`.
206
+ The implementation is identical. Rather than use the original module
207
+ directly, we re-implement it here so that Mosaic BERT's modules will not
208
+ be affected by any Composer surgery algorithm that modifies Hugging Face
209
+ BERT modules.
210
+ """
211
+
212
+ def __init__(self, config):
213
+ super().__init__()
214
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
215
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
216
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
217
+
218
+ def forward(
219
+ self, hidden_states: torch.Tensor, input_tensor: torch.Tensor
220
+ ) -> torch.Tensor:
221
+ hidden_states = self.dense(hidden_states)
222
+ hidden_states = self.dropout(hidden_states)
223
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
224
+ return hidden_states
225
+
226
+
227
+ class BertUnpadAttention(nn.Module):
228
+ """Chains attention, Dropout, and LayerNorm for Mosaic BERT."""
229
+
230
+ def __init__(self, config):
231
+ super().__init__()
232
+ self.self = BertUnpadSelfAttention(config)
233
+ self.output = BertSelfOutput(config)
234
+
235
+ def forward(
236
+ self,
237
+ input_tensor: torch.Tensor,
238
+ cu_seqlens: torch.Tensor,
239
+ max_s: int,
240
+ subset_idx: Optional[torch.Tensor] = None,
241
+ indices: Optional[torch.Tensor] = None,
242
+ attn_mask: Optional[torch.Tensor] = None,
243
+ bias: Optional[torch.Tensor] = None,
244
+ ) -> torch.Tensor:
245
+ """Forward pass for scaled self-attention without padding.
246
+
247
+ Arguments:
248
+ input_tensor: (total_nnz, dim)
249
+ cu_seqlens: (batch + 1,)
250
+ max_s: int
251
+ subset_idx: () set of indices whose values we care about at the end of the layer
252
+ (e.g., the masked tokens, if this is the final layer).
253
+ indices: None or (total_nnz,)
254
+ attn_mask: None or (batch, max_seqlen_in_batch)
255
+ bias: None or (batch, heads, max_seqlen_in_batch, max_seqlen_in_batch)
256
+ """
257
+ self_output = self.self(
258
+ input_tensor, cu_seqlens, max_s, indices, attn_mask, bias
259
+ )
260
+ if subset_idx is not None:
261
+ return self.output(
262
+ index_first_axis(self_output, subset_idx),
263
+ index_first_axis(input_tensor, subset_idx),
264
+ )
265
+ else:
266
+ return self.output(self_output, input_tensor)
267
+
268
+
269
+ class BertGatedLinearUnitMLP(nn.Module):
270
+ """Applies the FFN at the end of each Mosaic BERT layer.
271
+
272
+ Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate`
273
+ and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality, but
274
+ introduces Gated Linear Units.
275
+
276
+ Note: Mosaic BERT adds parameters in order to implement Gated Linear Units. To keep parameter count consistent with that of a
277
+ standard Hugging Face BERT, scale down `config.intermediate_size` by 2/3. For example, a Mosaic BERT constructed with
278
+ `config.intermediate_size=2048` will have the same parameter footprint as its Hugging Face BERT counterpart constructed
279
+ with the `config.intermediate_size=3072`.
280
+ However, in most cases it will not be necessary to adjust `config.intermediate_size` since, despite the increased
281
+ parameter size, Mosaic BERT typically offers a net higher throughput than a Hugging Face BERT built from the same `config`.
282
+ """
283
+
284
+ def __init__(self, config):
285
+ super().__init__()
286
+ self.config = config
287
+ self.gated_layers = nn.Linear(
288
+ config.hidden_size, config.intermediate_size * 2, bias=False
289
+ )
290
+ self.act = nn.GELU(approximate='none')
291
+ self.wo = nn.Linear(config.intermediate_size, config.hidden_size)
292
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
293
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
294
+
295
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
296
+ """Compute new hidden states from current hidden states.
297
+
298
+ Args:
299
+ hidden_states (torch.Tensor): The (unpadded) hidden states from
300
+ the attention layer [nnz, dim].
301
+ """
302
+ residual_connection = hidden_states
303
+ # compute the activation
304
+ hidden_states = self.gated_layers(hidden_states)
305
+ gated = hidden_states[:, : self.config.intermediate_size]
306
+ non_gated = hidden_states[:, self.config.intermediate_size :]
307
+ hidden_states = self.act(gated) * non_gated
308
+ hidden_states = self.dropout(hidden_states)
309
+ # multiply by the second matrix
310
+ hidden_states = self.wo(hidden_states)
311
+ # add the residual connection and post-LN
312
+ hidden_states = self.layernorm(hidden_states + residual_connection)
313
+ return hidden_states
314
+
315
+
316
+ class BertLayer(nn.Module):
317
+ """Composes the Mosaic BERT attention and FFN blocks into a single layer."""
318
+
319
+ def __init__(self, config: JBertConfig):
320
+ super().__init__()
321
+ self.attention = BertUnpadAttention(config)
322
+ self.mlp = BertGatedLinearUnitMLP(config)
323
+
324
+ def forward(
325
+ self,
326
+ hidden_states: torch.Tensor,
327
+ cu_seqlens: torch.Tensor,
328
+ seqlen: int,
329
+ subset_idx: Optional[torch.Tensor] = None,
330
+ indices: Optional[torch.Tensor] = None,
331
+ attn_mask: Optional[torch.Tensor] = None,
332
+ bias: Optional[torch.Tensor] = None,
333
+ ) -> torch.Tensor:
334
+ """Forward pass for a BERT layer, including both attention and MLP.
335
+
336
+ Args:
337
+ hidden_states: (total_nnz, dim)
338
+ cu_seqlens: (batch + 1,)
339
+ seqlen: int
340
+ subset_idx: () set of indices whose values we care about at the end of the layer
341
+ (e.g., the masked tokens, if this is the final layer).
342
+ indices: None or (total_nnz,)
343
+ attn_mask: None or (batch, max_seqlen_in_batch)
344
+ bias: None or (batch, heads, max_seqlen_in_batch, max_seqlen_in_batch)
345
+ """
346
+ attention_output = self.attention(
347
+ hidden_states, cu_seqlens, seqlen, subset_idx, indices, attn_mask, bias
348
+ )
349
+ layer_output = self.mlp(attention_output)
350
+ return layer_output
351
+
352
+
353
+ class JBertEncoder(nn.Module):
354
+ """A stack of BERT layers providing the backbone.
355
+
356
+ This module is modeled after the Hugging Face BERT's :class:`~transformers.model.bert.modeling_bert.BertEncoder`,
357
+ but with substantial modifications to implement unpadding and ALiBi.
358
+
359
+ Compared to the analogous Hugging Face BERT module, this module handles unpadding to reduce unnecessary computation
360
+ at padded tokens, and pre-computes attention biases to implement ALiBi.
361
+ """
362
+
363
+ def __init__(self, config: JBertConfig):
364
+ super().__init__()
365
+ self.layer = nn.ModuleList(
366
+ [BertLayer(config) for _ in range(config.num_hidden_layers)]
367
+ )
368
+
369
+ self.num_attention_heads = config.num_attention_heads
370
+
371
+ # The alibi mask will be dynamically expanded if it is too small for
372
+ # the input the model receives. But it generally helps to initialize it
373
+ # to a reasonably large size to help pre-allocate CUDA memory.
374
+ # The default `model_max_length` is 8192.
375
+ self._current_alibi_size = int(config.model_max_length)
376
+ self.alibi = torch.zeros(
377
+ (
378
+ 1,
379
+ self.num_attention_heads,
380
+ self._current_alibi_size,
381
+ self._current_alibi_size,
382
+ )
383
+ )
384
+ self.rebuild_alibi_tensor(size=config.model_max_length)
385
+
386
+ def rebuild_alibi_tensor(
387
+ self, size: int, device: Optional[Union[torch.device, str]] = None
388
+ ):
389
+ # Alibi
390
+ # Following https://github.com/ofirpress/attention_with_linear_biases/issues/5 (Implementation 1)
391
+ # In the causal case, you can exploit the fact that softmax is invariant to a uniform translation
392
+ # of the logits, which makes the math work out *after* applying causal masking. If no causal masking
393
+ # will be applied, it is necessary to construct the diagonal mask.
394
+ n_heads = self.num_attention_heads
395
+
396
+ def _get_alibi_head_slopes(n_heads: int) -> List[float]:
397
+ def get_slopes_power_of_2(n_heads: int) -> List[float]:
398
+ start = 2 ** (-(2 ** -(math.log2(n_heads) - 3)))
399
+ ratio = start
400
+ return [start * ratio**i for i in range(n_heads)]
401
+
402
+ # In the paper, they only train models that have 2^a heads for some a. This function
403
+ # has some good properties that only occur when the input is a power of 2. To
404
+ # maintain that even when the number of heads is not a power of 2, we use a
405
+ # workaround.
406
+ if math.log2(n_heads).is_integer():
407
+ return get_slopes_power_of_2(n_heads)
408
+
409
+ closest_power_of_2 = 2 ** math.floor(math.log2(n_heads))
410
+ slopes_a = get_slopes_power_of_2(closest_power_of_2)
411
+ slopes_b = _get_alibi_head_slopes(2 * closest_power_of_2)
412
+ slopes_b = slopes_b[0::2][: n_heads - closest_power_of_2]
413
+ return slopes_a + slopes_b
414
+
415
+ context_position = torch.arange(size, device=device)[:, None]
416
+ memory_position = torch.arange(size, device=device)[None, :]
417
+ relative_position = torch.abs(memory_position - context_position)
418
+ # [n_heads, max_token_length, max_token_length]
419
+ relative_position = relative_position.unsqueeze(0).expand(n_heads, -1, -1)
420
+ slopes = torch.Tensor(_get_alibi_head_slopes(n_heads)).to(device)
421
+ alibi = slopes.unsqueeze(1).unsqueeze(1) * -relative_position
422
+ # [1, n_heads, max_token_length, max_token_length]
423
+ alibi = alibi.unsqueeze(0)
424
+ assert alibi.shape == torch.Size([1, n_heads, size, size])
425
+
426
+ self._current_alibi_size = size
427
+ self.alibi = alibi
428
+
429
+ def forward(
430
+ self,
431
+ hidden_states: torch.Tensor,
432
+ attention_mask: Optional[torch.FloatTensor] = None,
433
+ head_mask: Optional[torch.FloatTensor] = None,
434
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
435
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
436
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
437
+ use_cache: Optional[bool] = None,
438
+ output_attentions: Optional[bool] = False,
439
+ output_hidden_states: Optional[bool] = False,
440
+ return_dict: Optional[bool] = True,
441
+ ) -> List[torch.Tensor]:
442
+ all_hidden_states = [] if output_hidden_states else None
443
+
444
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
445
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
446
+
447
+ attention_mask_bool = attention_mask.bool()
448
+ batch, seqlen = hidden_states.shape[:2]
449
+ # Unpad inputs and mask. It will remove tokens that are padded.
450
+ # Assume ntokens is total number of tokens (padded and non-padded)
451
+ # and ntokens_unpad is total number of non-padded tokens.
452
+ # Then unpadding performs the following compression of the inputs:
453
+ # hidden_states[ntokens,hidden] -> hidden_states[ntokens_unpad,hidden]
454
+ hidden_states, indices, cu_seqlens, _ = unpad_input(
455
+ hidden_states, attention_mask_bool
456
+ )
457
+
458
+ # Add alibi matrix to extended_attention_mask
459
+ if self._current_alibi_size < seqlen:
460
+ # Rebuild the alibi tensor when needed
461
+ warnings.warn(
462
+ f'Increasing alibi size from {self._current_alibi_size} to {seqlen}'
463
+ )
464
+ self.rebuild_alibi_tensor(size=seqlen, device=hidden_states.device)
465
+ elif self.alibi.device != hidden_states.device:
466
+ # Device catch-up
467
+ self.alibi = self.alibi.to(hidden_states.device)
468
+ alibi_bias = self.alibi[:, :, :seqlen, :seqlen]
469
+ attn_bias = extended_attention_mask[:, :, :seqlen, :seqlen]
470
+ alibi_attn_mask = attn_bias + alibi_bias
471
+
472
+ for layer_module in self.layer:
473
+ if output_hidden_states:
474
+ all_hidden_states.append(rearrange(hidden_states, '(b n) d -> b n d', b=batch))
475
+ hidden_states = layer_module(
476
+ hidden_states,
477
+ cu_seqlens,
478
+ seqlen,
479
+ None,
480
+ indices,
481
+ attn_mask=attention_mask,
482
+ bias=alibi_attn_mask,
483
+ )
484
+ # Pad inputs and mask. It will insert back zero-padded tokens.
485
+ # Assume ntokens is total number of tokens (padded and non-padded)
486
+ # and ntokens_unpad is total number of non-padded tokens.
487
+ # Then padding performs the following de-compression:
488
+ # hidden_states[ntokens_unpad,hidden] -> hidden_states[ntokens,hidden]
489
+ hidden_states = pad_input(hidden_states, indices, batch, seqlen)
490
+
491
+ if output_hidden_states:
492
+ all_hidden_states.append(hidden_states)
493
+
494
+ if not return_dict:
495
+ return tuple(
496
+ v for v in [hidden_states, all_hidden_states] if v is not None
497
+ )
498
+ return BaseModelOutputWithPastAndCrossAttentions(
499
+ last_hidden_state=hidden_states,
500
+ past_key_values=None,
501
+ hidden_states=all_hidden_states,
502
+ attentions=None,
503
+ cross_attentions=None,
504
+ )
505
+
506
+
507
+ class JBertPooler(nn.Module):
508
+ def __init__(self, config):
509
+ super().__init__()
510
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
511
+ self.activation = nn.Tanh()
512
+
513
+ def forward(
514
+ self, hidden_states: torch.Tensor, pool: Optional[bool] = True
515
+ ) -> torch.Tensor:
516
+ # We "pool" the model by simply taking the hidden state corresponding
517
+ # to the first token.
518
+ first_token_tensor = hidden_states[:, 0] if pool else hidden_states
519
+ pooled_output = self.dense(first_token_tensor)
520
+ pooled_output = self.activation(pooled_output)
521
+ return pooled_output
522
+
523
+
524
+ class BertPredictionHeadTransform(nn.Module):
525
+ def __init__(self, config):
526
+ super().__init__()
527
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
528
+ if isinstance(config.hidden_act, str):
529
+ self.transform_act_fn = ACT2FN[config.hidden_act]
530
+ else:
531
+ self.transform_act_fn = config.hidden_act
532
+ self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12)
533
+
534
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
535
+ hidden_states = self.dense(hidden_states)
536
+ hidden_states = self.transform_act_fn(hidden_states)
537
+ hidden_states = self.LayerNorm(hidden_states)
538
+ return hidden_states
539
+
540
+
541
+ class JBertModel(BertPreTrainedModel):
542
+ """Overall BERT model.
543
+
544
+ Args:
545
+ config: a JBertConfig class instance with the configuration to build a new model
546
+
547
+ Inputs:
548
+ `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
549
+ with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
550
+ `extract_features.py`, `run_classifier.py` and `run_squad.py`)
551
+ `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
552
+ types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
553
+ a `sentence B` token (see BERT paper for more details).
554
+ `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
555
+ selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
556
+ input sequence length in the current batch. It's the mask that we typically use for attention when
557
+ a batch has varying length sentences.
558
+ `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
559
+
560
+ Outputs: Tuple of (encoded_layers, pooled_output)
561
+ `encoded_layers`: controlled by `output_all_encoded_layers` argument:
562
+ - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
563
+ of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
564
+ encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
565
+ - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
566
+ to the last attention block of shape [batch_size, sequence_length, hidden_size],
567
+ `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
568
+ classifier pretrained on top of the hidden state associated to the first character of the
569
+ input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
570
+
571
+ Example usage:
572
+ ```python
573
+ # Already been converted into WordPiece token ids
574
+ input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
575
+ input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
576
+ token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
577
+ config = modeling.JBertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
578
+ num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
579
+ model = JBertModel(config=config)
580
+ all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
581
+ ```
582
+ """
583
+
584
+ config_class = JBertConfig
585
+
586
+ def __init__(self, config, add_pooling_layer=True):
587
+ super().__init__(config)
588
+ self.embeddings = JBertEmbeddings(config)
589
+ self.encoder = JBertEncoder(config)
590
+ self.pooler = JBertPooler(config) if add_pooling_layer else None
591
+ self.post_init()
592
+
593
+ def get_input_embeddings(self):
594
+ return self.embeddings.word_embeddings
595
+
596
+ def set_input_embeddings(self, value):
597
+ self.embeddings.word_embeddings = value
598
+
599
+ def forward(
600
+ self,
601
+ input_ids: torch.Tensor,
602
+ attention_mask: Optional[torch.Tensor] = None,
603
+ token_type_ids: Optional[torch.Tensor] = None,
604
+ position_ids: Optional[torch.Tensor] = None,
605
+ head_mask: Optional[torch.Tensor] = None,
606
+ inputs_embeds: Optional[torch.Tensor] = None,
607
+ encoder_hidden_states: Optional[torch.Tensor] = None,
608
+ encoder_attention_mask: Optional[torch.Tensor] = None,
609
+ output_attentions: Optional[bool] = False,
610
+ output_hidden_states: Optional[bool] = False,
611
+ return_dict: Optional[bool] = True,
612
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
613
+ if attention_mask is None:
614
+ attention_mask = torch.ones_like(input_ids)
615
+ if token_type_ids is None:
616
+ token_type_ids = torch.zeros_like(input_ids)
617
+
618
+ embedding_output = self.embeddings(input_ids, token_type_ids, position_ids)
619
+
620
+ encoder_outputs: BaseModelOutputWithPastAndCrossAttentions = self.encoder(
621
+ hidden_states=embedding_output,
622
+ attention_mask=attention_mask,
623
+ output_hidden_states=output_hidden_states,
624
+ return_dict=return_dict,
625
+ )
626
+
627
+ sequence_output = encoder_outputs[0]
628
+ pooled_output = (
629
+ self.pooler(sequence_output) if self.pooler is not None else None
630
+ )
631
+
632
+ if not return_dict:
633
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
634
+
635
+ #return encoder_outputs, None
636
+ return BaseModelOutputWithPoolingAndCrossAttentions(
637
+ last_hidden_state=sequence_output,
638
+ pooler_output=pooled_output,
639
+ past_key_values=encoder_outputs.past_key_values,
640
+ hidden_states=encoder_outputs.hidden_states,
641
+ attentions=encoder_outputs.attentions,
642
+ cross_attentions=encoder_outputs.cross_attentions,
643
+ )
644
+
645
+
646
+ ###################
647
+ # Bert Heads
648
+ ###################
649
+ class BertLMPredictionHead(nn.Module):
650
+ def __init__(self, config, bert_model_embedding_weights):
651
+ super().__init__()
652
+ self.transform = BertPredictionHeadTransform(config)
653
+ # The output weights are the same as the input embeddings, but there is
654
+ # an output-only bias for each token.
655
+ self.decoder = nn.Linear(
656
+ bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0)
657
+ )
658
+ self.decoder.weight = bert_model_embedding_weights
659
+
660
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
661
+ hidden_states = self.transform(hidden_states)
662
+ hidden_states = self.decoder(hidden_states)
663
+ return hidden_states
664
+
665
+
666
+ class BertOnlyMLMHead(nn.Module):
667
+ def __init__(self, config, bert_model_embedding_weights):
668
+ super().__init__()
669
+ self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
670
+
671
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
672
+ prediction_scores = self.predictions(sequence_output)
673
+ return prediction_scores
674
+
675
+
676
+ class BertOnlyNSPHead(nn.Module):
677
+ def __init__(self, config):
678
+ super().__init__()
679
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
680
+
681
+ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
682
+ seq_relationship_score = self.seq_relationship(pooled_output)
683
+ return seq_relationship_score
684
+
685
+
686
+ #####################
687
+ # Various Bert models
688
+ #####################
689
+ class JBertForMaskedLM(BertPreTrainedModel):
690
+ config_class = JBertConfig
691
+
692
+ def __init__(self, config):
693
+ super().__init__(config)
694
+
695
+ if config.is_decoder:
696
+ warnings.warn(
697
+ 'If you want to use `JBertForMaskedLM` make sure `config.is_decoder=False` for '
698
+ 'bi-directional self-attention.'
699
+ )
700
+
701
+ self.bert = JBertModel(config, add_pooling_layer=False)
702
+ self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
703
+
704
+ # Initialize weights and apply final processing
705
+ self.post_init()
706
+
707
+ def get_output_embeddings(self):
708
+ return self.cls.predictions.decoder
709
+
710
+ def set_output_embeddings(self, new_embeddings):
711
+ self.cls.predictions.decoder = new_embeddings
712
+
713
+ def forward(
714
+ self,
715
+ input_ids: Optional[torch.Tensor] = None,
716
+ attention_mask: Optional[torch.Tensor] = None,
717
+ token_type_ids: Optional[torch.Tensor] = None,
718
+ position_ids: Optional[torch.Tensor] = None,
719
+ head_mask: Optional[torch.Tensor] = None,
720
+ inputs_embeds: Optional[torch.Tensor] = None,
721
+ encoder_hidden_states: Optional[torch.Tensor] = None,
722
+ encoder_attention_mask: Optional[torch.Tensor] = None,
723
+ labels: Optional[torch.Tensor] = None,
724
+ output_attentions: Optional[bool] = None,
725
+ output_hidden_states: Optional[bool] = None,
726
+ return_dict: Optional[bool] = None,
727
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
728
+ # labels should be a `torch.LongTensor` of shape
729
+ # `(batch_size, sequence_length)`. These are used for computing the
730
+ # masked language modeling loss.
731
+ #
732
+ # Indices should be in `[-100, 0, ..., config.vocab_size]` (see
733
+ # `input_ids` docstring) Tokens with indices set to `-100` are ignored
734
+ # (masked), the loss is only computed for the tokens with labels in `[0,
735
+ # ..., config.vocab_size]`
736
+ #
737
+ # Prediction scores are only computed for masked tokens and the (bs,
738
+ # seqlen) dimensions are flattened
739
+ if (input_ids is not None) == (inputs_embeds is not None):
740
+ raise ValueError('Must specify either input_ids or input_embeds!')
741
+
742
+ return_dict = (
743
+ return_dict if return_dict is not None else self.config.use_return_dict
744
+ )
745
+
746
+ outputs = self.bert(
747
+ input_ids,
748
+ attention_mask=attention_mask,
749
+ token_type_ids=token_type_ids,
750
+ position_ids=position_ids,
751
+ head_mask=head_mask,
752
+ inputs_embeds=inputs_embeds,
753
+ encoder_hidden_states=encoder_hidden_states,
754
+ encoder_attention_mask=encoder_attention_mask,
755
+ output_attentions=output_attentions,
756
+ output_hidden_states=output_hidden_states,
757
+ return_dict=return_dict,
758
+ )
759
+
760
+ sequence_output = outputs[0]
761
+ prediction_scores = self.cls(sequence_output)
762
+
763
+ loss = None
764
+ if labels is not None:
765
+ # Compute loss
766
+ loss_fct = nn.CrossEntropyLoss()
767
+ loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
768
+
769
+ if not return_dict:
770
+ output = (prediction_scores,) + outputs[2:]
771
+ return ((loss,) + output) if loss is not None else output
772
+
773
+ return MaskedLMOutput(
774
+ loss=loss,
775
+ logits=prediction_scores,
776
+ hidden_states=outputs.hidden_states,
777
+ attentions=outputs.attentions,
778
+ )
779
+
780
+ def prepare_inputs_for_generation(
781
+ self, input_ids: torch.Tensor, attention_mask: torch.Tensor, **model_kwargs
782
+ ):
783
+ input_shape = input_ids.shape
784
+ effective_batch_size = input_shape[0]
785
+
786
+ # add a dummy token
787
+ if self.config.pad_token_id is None:
788
+ raise ValueError('The PAD token should be defined for generation')
789
+
790
+ attention_mask = torch.cat(
791
+ [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))],
792
+ dim=-1,
793
+ )
794
+ dummy_token = torch.full(
795
+ (effective_batch_size, 1),
796
+ self.config.pad_token_id,
797
+ dtype=torch.long,
798
+ device=input_ids.device,
799
+ )
800
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
801
+
802
+ return {'input_ids': input_ids, 'attention_mask': attention_mask}
803
+
804
+
805
+
806
+ class JBertForSequenceClassification(BertPreTrainedModel):
807
+ """Bert Model transformer with a sequence classification/regression head.
808
+
809
+ This head is just a linear layer on top of the pooled output. Used for,
810
+ e.g., GLUE tasks.
811
+ """
812
+
813
+ config_class = JBertConfig
814
+
815
+ def __init__(self, config):
816
+ super().__init__(config)
817
+ self.num_labels = config.num_labels
818
+ self.config = config
819
+
820
+ self.bert = JBertModel(config)
821
+ classifier_dropout = (
822
+ config.classifier_dropout
823
+ if config.classifier_dropout is not None
824
+ else config.hidden_dropout_prob
825
+ )
826
+ self.dropout = nn.Dropout(classifier_dropout)
827
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
828
+
829
+ # Initialize weights and apply final processing
830
+ self.post_init()
831
+
832
+ def forward(
833
+ self,
834
+ input_ids: Optional[torch.Tensor] = None,
835
+ attention_mask: Optional[torch.Tensor] = None,
836
+ token_type_ids: Optional[torch.Tensor] = None,
837
+ position_ids: Optional[torch.Tensor] = None,
838
+ head_mask: Optional[torch.Tensor] = None,
839
+ inputs_embeds: Optional[torch.Tensor] = None,
840
+ labels: Optional[torch.Tensor] = None,
841
+ output_attentions: Optional[bool] = None,
842
+ output_hidden_states: Optional[bool] = None,
843
+ return_dict: Optional[bool] = None,
844
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
845
+ # labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
846
+ # Labels for computing the sequence classification/regression loss.
847
+ # Indices should be in `[0, ..., config.num_labels - 1]`.
848
+ # If `config.num_labels == 1` a regression loss is computed
849
+ # (mean-square loss). If `config.num_labels > 1` a classification loss
850
+ # is computed (cross-entropy).
851
+
852
+ return_dict = (
853
+ return_dict if return_dict is not None else self.config.use_return_dict
854
+ )
855
+
856
+ outputs = self.bert(
857
+ input_ids,
858
+ attention_mask=attention_mask,
859
+ token_type_ids=token_type_ids,
860
+ position_ids=position_ids,
861
+ head_mask=head_mask,
862
+ inputs_embeds=inputs_embeds,
863
+ output_attentions=output_attentions,
864
+ output_hidden_states=output_hidden_states,
865
+ return_dict=return_dict,
866
+ )
867
+
868
+ pooled_output = outputs[1]
869
+
870
+ pooled_output = self.dropout(pooled_output)
871
+ logits = self.classifier(pooled_output)
872
+
873
+ loss = None
874
+ if labels is not None:
875
+ # Compute loss
876
+ if self.config.problem_type is None:
877
+ if self.num_labels == 1:
878
+ self.config.problem_type = 'regression'
879
+ elif self.num_labels > 1 and (
880
+ labels.dtype == torch.long or labels.dtype == torch.int
881
+ ):
882
+ self.config.problem_type = 'single_label_classification'
883
+ else:
884
+ self.config.problem_type = 'multi_label_classification'
885
+
886
+ if self.config.problem_type == 'regression':
887
+ loss_fct = nn.MSELoss()
888
+ if self.num_labels == 1:
889
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
890
+ else:
891
+ loss = loss_fct(logits, labels)
892
+ elif self.config.problem_type == 'single_label_classification':
893
+ loss_fct = nn.CrossEntropyLoss()
894
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
895
+ elif self.config.problem_type == 'multi_label_classification':
896
+ loss_fct = nn.BCEWithLogitsLoss()
897
+ loss = loss_fct(logits, labels)
898
+
899
+ if not return_dict:
900
+ output = (logits,) + outputs[2:]
901
+ return ((loss,) + output) if loss is not None else output
902
+
903
+ return SequenceClassifierOutput(
904
+ loss=loss,
905
+ logits=logits,
906
+ hidden_states=None,
907
+ attentions=None,
908
+ )
special_tokens_map (6).json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config (7).json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 8192,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
vocab (4).txt ADDED
The diff for this file is too large to render. See raw diff