Bo1015 commited on
Commit
613b707
1 Parent(s): 870d719

Upload 9 files

Browse files
config.json CHANGED
@@ -1,21 +1,21 @@
1
  {
2
- "_name_or_path": "biomap-research/xtrimopglm-3b-clm",
3
  "add_bias_linear": true,
4
  "add_qkv_bias": true,
5
  "apply_query_key_layer_scaling": true,
6
  "apply_residual_connection_post_layernorm": true,
7
  "architectures": [
8
- "xTrimoPGLMModel"
9
  ],
10
  "attention_dropout": 0.0,
11
  "attention_softmax_in_fp32": true,
12
  "auto_map": {
13
- "AutoConfig": "configuration_xtrimopglm.xTrimoPGLMConfig",
14
- "AutoModel": "modeling_xtrimopglm.xTrimoPGLMForMaskedLM",
15
- "AutoModelForCausalLM": "modeling_xtrimopglm.xTrimoPGLMForCasualLM",
16
- "AutoModelForMaskedLM": "modeling_xtrimopglm.xTrimoPGLMForMaskedLM",
17
- "AutoModelForSequenceClassification": "modeling_xtrimopglm.xTrimoPGLMForSequenceClassification",
18
- "AutoModelForTokenClassification": "modeling_xtrimopglm.xTrimoPGLMForTokenClassification"
19
  },
20
  "bias_dropout_fusion": true,
21
  "deepnorm": true,
@@ -30,7 +30,7 @@
30
  "is_causal": true,
31
  "kv_channels": 64,
32
  "layernorm_epsilon": 1e-05,
33
- "model_type": "xTrimoPGLM",
34
  "moe": false,
35
  "multi_query_attention": false,
36
  "multi_query_group_num": 1,
 
1
  {
2
+ "_name_or_path": "proteinglm-3b-clm",
3
  "add_bias_linear": true,
4
  "add_qkv_bias": true,
5
  "apply_query_key_layer_scaling": true,
6
  "apply_residual_connection_post_layernorm": true,
7
  "architectures": [
8
+ "ProteinGLMModel"
9
  ],
10
  "attention_dropout": 0.0,
11
  "attention_softmax_in_fp32": true,
12
  "auto_map": {
13
+ "AutoConfig": "configuration_proteinglm.ProteinGLMConfig",
14
+ "AutoModel": "modeling_proteinglm.ProteinGLMForMaskedLM",
15
+ "AutoModelForCausalLM": "modeling_proteinglm.ProteinGLMForCasualLM",
16
+ "AutoModelForMaskedLM": "modeling_proteinglm.ProteinGLMForMaskedLM",
17
+ "AutoModelForSequenceClassification": "modeling_proteinglm.ProteinGLMForSequenceClassification",
18
+ "AutoModelForTokenClassification": "modeling_proteinglm.ProteinGLMForTokenClassification"
19
  },
20
  "bias_dropout_fusion": true,
21
  "deepnorm": true,
 
30
  "is_causal": true,
31
  "kv_channels": 64,
32
  "layernorm_epsilon": 1e-05,
33
+ "model_type": "ProteinGLM",
34
  "moe": false,
35
  "multi_query_attention": false,
36
  "multi_query_group_num": 1,
configuration_xtrimopglm.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class ProteinGLMConfig(PretrainedConfig):
5
+ model_type = "ProteinGLM"
6
+ def __init__(
7
+ self,
8
+ num_layers=36,
9
+ padded_vocab_size=128,
10
+ hidden_size=2560,
11
+ ffn_hidden_size=6832,
12
+ kv_channels=64,
13
+ num_attention_heads=40,
14
+ seq_length=1024,
15
+ hidden_dropout=0.0,
16
+ attention_dropout=0.0,
17
+ layernorm_epsilon=1e-5,
18
+ glu_activation='geglu',
19
+ rmsnorm=False,
20
+ deepnorm=True,
21
+ apply_residual_connection_post_layernorm=True,
22
+ post_layer_norm=True,
23
+ add_bias_linear=True,
24
+ add_qkv_bias=True,
25
+ bias_dropout_fusion=True,
26
+ multi_query_attention=False,
27
+ multi_query_group_num=1,
28
+ apply_query_key_layer_scaling=True,
29
+ attention_softmax_in_fp32=True,
30
+ fp32_residual_connection=False,
31
+ quantization_bit=0,
32
+ rotary_embedding_2d=False,
33
+ use_pytorch_sdpa=True,
34
+ is_causal=True,
35
+ use_cache=True,
36
+ initializer_range=0.02,
37
+ moe=False,
38
+ num_experts=0,
39
+ experts_per_token=0,
40
+ untie_head=False,
41
+ head_num=1,
42
+ **kwargs
43
+ ):
44
+
45
+ if not deepnorm and apply_residual_connection_post_layernorm:
46
+ print(f"Warning: deepnorm is False and apply_residual_connection_post_layernorm is True")
47
+
48
+ if deepnorm:
49
+ apply_residual_connection_post_layernorm = True
50
+
51
+ self.num_layers = num_layers
52
+ self.vocab_size = padded_vocab_size
53
+ self.padded_vocab_size = padded_vocab_size
54
+ self.hidden_size = hidden_size
55
+ self.ffn_hidden_size = ffn_hidden_size
56
+ self.kv_channels = kv_channels
57
+ self.num_attention_heads = num_attention_heads
58
+ self.seq_length = seq_length
59
+ self.hidden_dropout = hidden_dropout
60
+ self.attention_dropout = attention_dropout
61
+ self.layernorm_epsilon = layernorm_epsilon
62
+ self.glu_activation = glu_activation
63
+ self.rmsnorm = rmsnorm
64
+ self.deepnorm = deepnorm
65
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
66
+ self.post_layer_norm = post_layer_norm
67
+ self.add_bias_linear = add_bias_linear
68
+ self.add_qkv_bias = add_qkv_bias
69
+ self.bias_dropout_fusion = bias_dropout_fusion
70
+ self.multi_query_attention = multi_query_attention
71
+ self.multi_query_group_num = multi_query_group_num
72
+ self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
73
+ self.attention_softmax_in_fp32 = attention_softmax_in_fp32
74
+ self.fp32_residual_connection = fp32_residual_connection
75
+ self.quantization_bit = quantization_bit
76
+ self.rotary_embedding_2d = rotary_embedding_2d
77
+ self.is_causal = is_causal
78
+ self.use_cache = use_cache
79
+ self.initializer_range = initializer_range
80
+ self.use_pytorch_sdpa = use_pytorch_sdpa
81
+ self.moe = moe
82
+ self.num_experts = num_experts
83
+ self.experts_per_token = experts_per_token
84
+ self.untie_head = untie_head
85
+ self.head_num=head_num
86
+ super().__init__(**kwargs)
modeling_proteinglm.py ADDED
@@ -0,0 +1,1571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ProteinGLM model. """
2
+
3
+ import math
4
+ import copy
5
+ import warnings
6
+ import re
7
+ import sys
8
+ import os
9
+ import pathlib
10
+ import time
11
+ import random
12
+ import numpy as np
13
+ from tqdm.auto import tqdm
14
+
15
+ import torch, deepspeed
16
+ import torch.utils.checkpoint
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+ from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss
20
+ from torch.nn.utils import skip_init
21
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
22
+ from copy import deepcopy
23
+ from collections import namedtuple
24
+
25
+ from transformers.modeling_outputs import (
26
+ BaseModelOutputWithPast,
27
+ MaskedLMOutput,
28
+ CausalLMOutputWithPast,
29
+ SequenceClassifierOutput,
30
+ TokenClassifierOutput
31
+ )
32
+ from transformers import PreTrainedModel
33
+ from transformers.utils import logging
34
+ from transformers.generation.logits_process import LogitsProcessor
35
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
36
+
37
+ from .configuration_proteinglm import ProteinGLMConfig
38
+ from .quantization import quantize
39
+
40
+ def get_checkpoint_fn():
41
+ if deepspeed.checkpointing.is_configured():
42
+ checkpoint = deepspeed.checkpointing.checkpoint
43
+ else:
44
+ checkpoint = torch.utils.checkpoint.checkpoint
45
+ return checkpoint
46
+
47
+ # flags required to enable jit fusion kernels
48
+
49
+ if sys.platform != 'darwin':
50
+ torch._C._jit_set_profiling_mode(False)
51
+ torch._C._jit_set_profiling_executor(False)
52
+ torch._C._jit_override_can_fuse_on_cpu(True)
53
+ torch._C._jit_override_can_fuse_on_gpu(True)
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ _CHECKPOINT_FOR_DOC = "Bo1015/proteinglm-100b-int4"
58
+ _CONFIG_FOR_DOC = "ProteinGLMConfig"
59
+ DeepNormCoefficients = namedtuple("DeepNormCoefficients", ["alpha", "beta"])
60
+
61
+ def default_init(cls, *args, **kwargs):
62
+ return cls(*args, **kwargs)
63
+
64
+
65
+ def get_deepnorm_coefficients(config: ProteinGLMConfig):
66
+ """
67
+ DeepNorm coefficients from : https://kexue.fm/archives/8978
68
+ """
69
+ num_layers = config.num_layers
70
+ return DeepNormCoefficients(alpha=(2 * num_layers) ** 0.5, beta=(2 * num_layers) ** -0.5)
71
+
72
+
73
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
74
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
75
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
76
+ scores.zero_()
77
+ scores[..., 5] = 5e4
78
+ return scores
79
+
80
+
81
+ def split_tensor_along_last_dim(
82
+ tensor: torch.Tensor,
83
+ num_partitions: int,
84
+ contiguous_split_chunks: bool = False,
85
+ ) -> List[torch.Tensor]:
86
+ """Split a tensor along its last dimension.
87
+
88
+ Arguments:
89
+ tensor: input tensor.
90
+ num_partitions: number of partitions to split the tensor
91
+ contiguous_split_chunks: If True, make each chunk contiguous
92
+ in memory.
93
+
94
+ Returns:
95
+ A list of Tensors
96
+ """
97
+ # Get the size and dimension.
98
+ last_dim = tensor.dim() - 1
99
+ last_dim_size = tensor.size()[last_dim] // num_partitions
100
+ # Split.
101
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
102
+ # Note: torch.split does not create contiguous tensors by default.
103
+ if contiguous_split_chunks:
104
+ return tuple(chunk.contiguous() for chunk in tensor_list)
105
+
106
+ return tensor_list
107
+
108
+ class RotaryEmbedding(torch.nn.Module):
109
+
110
+ def __init__(self, dim, base=10000, precision=torch.half, learnable=False):
111
+ super().__init__()
112
+ inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)).to(precision)
113
+ self.dim = dim
114
+ self.base = base
115
+ self.learnable = learnable
116
+ if learnable:
117
+ self.inv_freq = torch.nn.Parameter(inv_freq)
118
+ self.max_seq_len_cached = None
119
+ else:
120
+ self.register_buffer('inv_freq', inv_freq)
121
+ self.max_seq_len_cached = None
122
+ self.cos_cached = None
123
+ self.sin_cached = None
124
+ self.precision = precision
125
+
126
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
127
+ if f'{prefix}inv_freq' in state_dict:
128
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
129
+ else:
130
+ self.inv_freq.copy_(1. / (self.base ** (torch.arange(0, self.dim, 2).float() / self.dim)).to(self.precision))
131
+
132
+ def forward(self, x, seq_dim=1, seq_len=None):
133
+ if seq_len is None:
134
+ seq_len = x.shape[seq_dim]
135
+ if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):
136
+ self.max_seq_len_cached = None if self.learnable else seq_len
137
+ t = torch.arange(seq_len, device=x.device, dtype=torch.float32)
138
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq.to(x.device))
139
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
140
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
141
+ if self.precision == torch.bfloat16 or self.precision == torch.half:
142
+ emb = emb.float()
143
+ # [sx, 1 (b * np), hn]
144
+ cos_cached = emb.cos()[:, None, :]
145
+ sin_cached = emb.sin()[:, None, :]
146
+ if self.precision == torch.bfloat16:
147
+ cos_cached = cos_cached.bfloat16()
148
+ sin_cached = sin_cached.bfloat16()
149
+ elif self.precision == torch.half:
150
+ cos_cached = cos_cached.half()
151
+ sin_cached = sin_cached.half()
152
+ if self.learnable:
153
+ return cos_cached, sin_cached
154
+ self.cos_cached, self.sin_cached = cos_cached, sin_cached
155
+ return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
156
+
157
+ def rotate_half(x):
158
+ x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
159
+ return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions
160
+
161
+ def assert_dim_check(tensor, ndim=None, shape=None):
162
+ if ndim is not None:
163
+ assert tensor.ndim == ndim, f"Exepct tensor.ndim={ndim}. gut got tensor.shape={tensor.shape}"
164
+ if shape is not None:
165
+ assert list(tensor.shape) == list(shape), f"Exepct tensor.shape={shape}. gut got tensor.shape={tensor.shape}"
166
+
167
+ def apply_rotary_pos_emb_index_torch(q, k, cos, sin, position_id): # jitting fails with bf16
168
+ # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]
169
+ cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \
170
+ F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)
171
+ q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
172
+ return q, k
173
+
174
+ class RMSNorm(torch.nn.Module):
175
+ def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
176
+ super().__init__()
177
+ self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
178
+ self.eps = eps
179
+
180
+ def forward(self, hidden_states: torch.Tensor):
181
+ input_dtype = hidden_states.dtype
182
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
183
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
184
+
185
+ return (self.weight * hidden_states).to(input_dtype)
186
+
187
+ class CoreAttention(torch.nn.Module):
188
+ def __init__(self, config: ProteinGLMConfig, layer_number):
189
+ super(CoreAttention, self).__init__()
190
+
191
+ self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
192
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
193
+ if self.apply_query_key_layer_scaling:
194
+ self.attention_softmax_in_fp32 = True
195
+ self.layer_number = max(1, layer_number)
196
+
197
+ projection_size = config.kv_channels * config.num_attention_heads
198
+
199
+ # Per attention head and per partition values.
200
+ self.hidden_size_per_partition = projection_size
201
+ self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
202
+ self.num_attention_heads_per_partition = config.num_attention_heads
203
+
204
+ coeff = None
205
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
206
+ if self.apply_query_key_layer_scaling:
207
+ coeff = self.layer_number
208
+ self.norm_factor *= coeff
209
+ self.coeff = coeff
210
+
211
+ self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
212
+
213
+ self.is_causal = config.is_causal
214
+ self.use_pytorch_sdpa = config.use_pytorch_sdpa
215
+
216
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
217
+ # query_layer, key_layer, value_layer: [seq_len, batch_size, num_heads, head_dim]
218
+ # import pdb; pdb.set_trace();
219
+ pytorch_major_version = int(torch.__version__.split('.')[0])
220
+ # assert pytorch_major_version >= 2, f"Expect PyTorch version > 2.0"
221
+ if pytorch_major_version >= 2 and self.use_pytorch_sdpa:
222
+ dropout_p = self.attention_dropout.p if self.training else 0
223
+ # [seq_len, batch_size, num_heads, head_dim] -> [batch_size, num_heads, seq_len, head_dim]
224
+ query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]]
225
+ # import pdb; pdb.set_trace();
226
+ if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
227
+ # context_layer: [batch_size, num_heads, seq_len, head_dim]
228
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, is_causal=self.is_causal, dropout_p=dropout_p)
229
+ else:
230
+ if (attention_mask is not None) and (attention_mask.dtype == torch.bool):
231
+ attention_mask = attention_mask.logical_not() ## DO NOT inplace operation!!!!
232
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attention_mask, dropout_p=dropout_p)
233
+ # [batch_size, num_heads, seq_len, head_dim] -> [seq_len, batch_size, num_heads, head_dim]
234
+ context_layer = context_layer.permute(2, 0, 1, 3)
235
+ # [seq_len, batch_size, 2560]
236
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
237
+ context_layer = context_layer.reshape(*new_context_layer_shape)
238
+ else:
239
+ # Raw attention scores
240
+
241
+ # [b, np, sq, sk]
242
+ output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
243
+
244
+ # [sq, b, np, hn] -> [sq, b * np, hn]
245
+ query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
246
+ # [sk, b, np, hn] -> [sk, b * np, hn]
247
+ key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
248
+
249
+ # preallocting input tensor: [b * np, sq, sk]
250
+ matmul_input_buffer = torch.empty(
251
+ output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
252
+ device=query_layer.device
253
+ )
254
+
255
+ # Raw attention scores. [b * np, sq, sk]
256
+ matmul_result = torch.baddbmm(
257
+ matmul_input_buffer,
258
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
259
+ key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
260
+ beta=0.0,
261
+ alpha=(1.0 / self.norm_factor),
262
+ )
263
+
264
+ # change view to [b, np, sq, sk]
265
+ attention_scores = matmul_result.view(*output_size)
266
+
267
+ # ===========================
268
+ # Attention probs and dropout
269
+ # ===========================
270
+
271
+ # attention scores and attention mask [b, np, sq, sk]
272
+ if self.attention_softmax_in_fp32:
273
+ attention_scores = attention_scores.float()
274
+ if self.coeff is not None:
275
+ attention_scores = attention_scores * self.coeff
276
+ if self.is_causal and attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
277
+ attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
278
+ device=attention_scores.device, dtype=torch.bool)
279
+ attention_mask.tril_()
280
+ attention_mask = ~attention_mask
281
+ if attention_mask is not None:
282
+ attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
283
+ attention_probs = F.softmax(attention_scores, dim=-1)
284
+ attention_probs = attention_probs.type_as(value_layer)
285
+
286
+ # This is actually dropping out entire tokens to attend to, which might
287
+ # seem a bit unusual, but is taken from the original Transformer paper.
288
+ attention_probs = self.attention_dropout(attention_probs)
289
+ # =========================
290
+ # Context layer. [sq, b, hp]
291
+ # =========================
292
+
293
+ # value_layer -> context layer.
294
+ # [sk, b, np, hn] --> [b, np, sq, hn]
295
+
296
+ # context layer shape: [b, np, sq, hn]
297
+ output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
298
+ # change view [sk, b * np, hn]
299
+ value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
300
+ # change view [b * np, sq, sk]
301
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
302
+ # matmul: [b * np, sq, hn]
303
+ context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
304
+ # change view [b, np, sq, hn]
305
+ context_layer = context_layer.view(*output_size)
306
+ # [b, np, sq, hn] --> [sq, b, np, hn]
307
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
308
+ # [sq, b, np, hn] --> [sq, b, hp]
309
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
310
+ context_layer = context_layer.view(*new_context_layer_shape)
311
+
312
+ return context_layer
313
+
314
+
315
+ class SelfAttention(torch.nn.Module):
316
+ """Parallel self-attention layer abstract class.
317
+
318
+ Self-attention layer takes input with size [s, b, h]
319
+ and returns output of the same size.
320
+ """
321
+
322
+ def __init__(self, config: ProteinGLMConfig, layer_number, device=None):
323
+ super(SelfAttention, self).__init__()
324
+ self.layer_number = max(1, layer_number)
325
+
326
+ self.projection_size = config.kv_channels * config.num_attention_heads
327
+
328
+ # Per attention head and per partition values.
329
+ self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
330
+ self.num_attention_heads_per_partition = config.num_attention_heads
331
+
332
+ self.multi_query_attention = config.multi_query_attention
333
+ self.qkv_hidden_size = 3 * self.projection_size
334
+ if self.multi_query_attention:
335
+ self.num_multi_query_groups_per_partition = config.multi_query_group_num
336
+ self.qkv_hidden_size = (
337
+ self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
338
+ )
339
+ self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
340
+ bias=config.add_bias_linear or config.add_qkv_bias,
341
+ device=device, **_config_to_kwargs(config)
342
+ )
343
+
344
+ self.core_attention = CoreAttention(config, self.layer_number)
345
+
346
+ # Output.
347
+ self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear, device=device, **_config_to_kwargs(config))
348
+
349
+ self.rotary_embedding_2d = config.rotary_embedding_2d
350
+ # dim, base=10000, precision=torch.half, learnable=False
351
+ self.rotary_emb = RotaryEmbedding(self.hidden_size_per_attention_head // 2 if self.rotary_embedding_2d else self.hidden_size_per_attention_head,
352
+ base=10000, precision=config.torch_dtype, learnable=False)
353
+
354
+
355
+ def forward(
356
+ self, hidden_states, attention_mask, position_ids, kv_cache=None, use_cache=True
357
+ ):
358
+ # hidden_states: [sq, b, h]
359
+
360
+ # =================================================
361
+ # Pre-allocate memory for key-values for inference.
362
+ # =================================================
363
+ # =====================
364
+ # Query, Key, and Value
365
+ # =====================
366
+
367
+ # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
368
+ mixed_x_layer = self.query_key_value(hidden_states)
369
+
370
+ if self.multi_query_attention:
371
+ (query_layer, key_layer, value_layer) = mixed_x_layer.split(
372
+ [
373
+ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
374
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
375
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
376
+ ],
377
+ dim=-1,
378
+ )
379
+ query_layer = query_layer.view(
380
+ query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
381
+ )
382
+ key_layer = key_layer.view(
383
+ key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
384
+ )
385
+ value_layer = value_layer.view(
386
+ value_layer.size()[:-1]
387
+ + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
388
+ )
389
+ else:
390
+ new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * self.hidden_size_per_attention_head)
391
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
392
+ # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
393
+ (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
394
+
395
+ # apply relative positional encoding (rotary embedding)
396
+ if position_ids is not None: # [seq_len, 2, batch_size, 32, 2]
397
+
398
+ if self.rotary_embedding_2d:
399
+ q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1)) # 32
400
+ k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))
401
+ # import pdb; pdb.set_trace();
402
+ cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) # 32
403
+ position_ids, block_position_ids = \
404
+ position_ids[:, 0, :].transpose(0, 1).contiguous(), \
405
+ position_ids[:, 1, :].transpose(0, 1).contiguous()
406
+ q1, k1 = apply_rotary_pos_emb_index_torch(q1, k1, cos, sin, position_ids)
407
+ q2, k2 = apply_rotary_pos_emb_index_torch(q2, k2, cos, sin, block_position_ids)
408
+ query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))
409
+ key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))
410
+ else:
411
+ # [b, sq] -> [sq, b]
412
+ position_ids = position_ids.transpose(0, 1)
413
+ cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)
414
+ query_layer, key_layer = apply_rotary_pos_emb_index_torch(query_layer, key_layer, cos, sin, position_ids)
415
+
416
+ # adjust key and value for inference
417
+ if kv_cache is not None:
418
+ cache_k, cache_v = kv_cache
419
+ key_layer = torch.cat((cache_k, key_layer), dim=0)
420
+ value_layer = torch.cat((cache_v, value_layer), dim=0)
421
+ if use_cache:
422
+ kv_cache = (key_layer, value_layer)
423
+ else:
424
+ kv_cache = None
425
+
426
+ if self.multi_query_attention:
427
+ key_layer = key_layer.unsqueeze(-2)
428
+ key_layer = key_layer.expand(-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1)
429
+ key_layer = key_layer.contiguous().view(key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
430
+ value_layer = value_layer.unsqueeze(-2)
431
+ value_layer = value_layer.expand(-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1)
432
+ value_layer = value_layer.contiguous().view(value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head))
433
+
434
+ # ==================================
435
+ # core attention computation
436
+ # ==================================
437
+
438
+ context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) # context_layer: [seq_len, batch_size, num_heads*head_dim]
439
+ output = self.dense(context_layer)
440
+ # =================
441
+ # Output. [sq, b, h]
442
+ # =================
443
+
444
+ # output = context_layer @ self.dense.weight.T + self.dense.bias
445
+ return output, kv_cache
446
+
447
+
448
+ def _config_to_kwargs(args):
449
+ common_kwargs = {
450
+ "dtype": args.torch_dtype,
451
+ }
452
+ return common_kwargs
453
+
454
+
455
+ class MLP(torch.nn.Module):
456
+ """MLP.
457
+
458
+ MLP will take the input with h hidden state, project it to 4*h
459
+ hidden dimension, perform nonlinear transformation, and project the
460
+ state back into h hidden dimension.
461
+ """
462
+
463
+ def __init__(self, config: ProteinGLMConfig, device=None):
464
+ super(MLP, self).__init__()
465
+
466
+ self.add_bias = config.add_bias_linear
467
+ self.moe = config.moe
468
+ self.num_experts = config.num_experts
469
+ self.experts_per_token = config.experts_per_token # 2
470
+
471
+ # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
472
+ self.dense_h_to_4h = nn.Linear(
473
+ config.hidden_size,
474
+ config.ffn_hidden_size * 2,
475
+ bias=self.add_bias,
476
+ device=device,
477
+ **_config_to_kwargs(config)
478
+ )
479
+
480
+ def swiglu(x):
481
+ x = torch.chunk(x, 2, dim=-1)
482
+ return x[0] * F.silu(x[1])
483
+
484
+ def geglu(x):
485
+ x = torch.chunk(x, 2, dim=-1)
486
+ return x[0] * F.gelu(x[1])
487
+
488
+ if config.glu_activation == 'geglu':
489
+ self.activation_func = geglu
490
+ elif config.glu_activation == 'swiglu':
491
+ self.activation_func = swiglu
492
+ else:
493
+ assert RuntimeError(f"Unsupported glu_activation: {config.glu_activation}")
494
+
495
+ # Project back to h.
496
+ self.dense_4h_to_h = nn.Linear(
497
+ config.ffn_hidden_size,
498
+ config.hidden_size,
499
+ bias=self.add_bias,
500
+ device=device,
501
+ **_config_to_kwargs(config)
502
+ )
503
+
504
+ if self.moe:
505
+ assert self.num_experts > 1
506
+ del self.dense_h_to_4h
507
+ del self.dense_4h_to_h
508
+ self.router = nn.Linear(
509
+ config.hidden_size,
510
+ config.num_experts,
511
+ bias=False,
512
+ device=device,
513
+ dtype=torch.float32
514
+ )
515
+ for i in range(0, self.num_experts):
516
+ self.register_module(f"dense_h_to_4h_{i}", nn.Linear(
517
+ config.hidden_size,
518
+ config.ffn_hidden_size * 2,
519
+ bias=self.add_bias,
520
+ device=device,
521
+ **_config_to_kwargs(config)
522
+ ))
523
+ self.register_module(f"dense_4h_to_h_{i}", nn.Linear(
524
+ config.ffn_hidden_size,
525
+ config.hidden_size,
526
+ bias=self.add_bias,
527
+ device=device,
528
+ **_config_to_kwargs(config)
529
+ ))
530
+
531
+ def moe_forward(self, hidden_states, expert_idx):
532
+ intermediate_parallel = getattr(self, f"dense_h_to_4h_{expert_idx}")(hidden_states)
533
+ intermediate_parallel = self.activation_func(intermediate_parallel)
534
+ output = getattr(self, f"dense_4h_to_h_{expert_idx}")(intermediate_parallel)
535
+ return output
536
+
537
+ def forward(self, hidden_states):
538
+ if self.moe:
539
+ # import pdb; pdb.set_trace();
540
+ s, b, n = hidden_states.shape
541
+ dtype = hidden_states.dtype
542
+ hidden_states = hidden_states.view(-1, hidden_states.size(2)) # [s*b h]
543
+ route = self.router(hidden_states).to(dtype)
544
+
545
+ weights, selected_experts = torch.topk(route, self.experts_per_token)
546
+ weights = F.softmax(weights, dim=1, dtype=torch.float).to(hidden_states.dtype)
547
+ output = torch.zeros_like(hidden_states, dtype=hidden_states.dtype, device=hidden_states.device)
548
+ for expert_idx in range(self.num_experts):
549
+ batch_idx, nth_expert = torch.where(selected_experts == expert_idx)
550
+ if nth_expert.shape[0] == 0:
551
+ continue
552
+ cur_out = self.moe_forward(hidden_states[batch_idx], expert_idx)
553
+ output[batch_idx] += weights[batch_idx, nth_expert, None] * cur_out
554
+ output = output.reshape(s, b, n)
555
+ else:
556
+ # [s, b, 4hp]
557
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
558
+ intermediate_parallel = self.activation_func(intermediate_parallel)
559
+ # [s, b, h]
560
+ output = self.dense_4h_to_h(intermediate_parallel)
561
+ return output
562
+
563
+ class ProteinGLMBlock(torch.nn.Module):
564
+ """A single transformer layer.
565
+
566
+ Transformer layer takes input with size [s, b, h] and returns an
567
+ output of the same size.
568
+ """
569
+
570
+ def __init__(self, config: ProteinGLMConfig, layer_number, device=None):
571
+ super(ProteinGLMBlock, self).__init__()
572
+ self.layer_number = layer_number
573
+
574
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
575
+
576
+ self.fp32_residual_connection = config.fp32_residual_connection
577
+
578
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
579
+ # Layernorm on the input data.
580
+ self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon)
581
+
582
+ # Self attention.
583
+ self.self_attention = SelfAttention(config, layer_number, device=device)
584
+ self.hidden_dropout = config.hidden_dropout
585
+
586
+ # Layernorm on the attention output
587
+ self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon)
588
+
589
+ # MLP
590
+ self.mlp = MLP(config, device=device)
591
+
592
+ self.deepnorm_coeff = get_deepnorm_coefficients(config) if config.deepnorm else None
593
+
594
+ def forward(
595
+ self, hidden_states, attention_mask, position_ids, kv_cache=None, use_cache=True,
596
+ ):
597
+ # hidden_states: [s, b, h]
598
+ # Layer norm at the beginning of the transformer layer.
599
+ layernorm_output = self.input_layernorm(hidden_states)
600
+ # Self attention.
601
+ attention_output, kv_cache = self.self_attention(
602
+ layernorm_output,
603
+ attention_mask,
604
+ position_ids, # [batch_size, 2, seq_len, 32, 2]
605
+ kv_cache=kv_cache,
606
+ use_cache=use_cache
607
+ )
608
+
609
+ # Residual connection.
610
+ if self.apply_residual_connection_post_layernorm:
611
+ residual = layernorm_output
612
+ else:
613
+ residual = hidden_states
614
+
615
+ layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
616
+ if self.deepnorm_coeff is not None:
617
+ layernorm_input = residual*self.deepnorm_coeff.alpha + layernorm_input
618
+ else:
619
+ layernorm_input = residual + layernorm_input
620
+
621
+ # Layer norm post the self attention.
622
+ layernorm_output = self.post_attention_layernorm(layernorm_input)
623
+
624
+ # MLP.
625
+ mlp_output = self.mlp(layernorm_output)
626
+
627
+ # Second residual connection.
628
+ if self.apply_residual_connection_post_layernorm:
629
+ residual = layernorm_output
630
+ else:
631
+ residual = layernorm_input
632
+
633
+ output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
634
+ if self.deepnorm_coeff is not None:
635
+ output = residual*self.deepnorm_coeff.alpha + output
636
+ else:
637
+ #print(f"2 self.deepnorm_coeff is None")
638
+ output = residual + output
639
+
640
+ return output, kv_cache
641
+
642
+
643
+ class ProteinGLMTransformer(torch.nn.Module):
644
+ """Transformer class."""
645
+
646
+ def __init__(self, config: ProteinGLMConfig, device=None):
647
+ super(ProteinGLMTransformer, self).__init__()
648
+
649
+ self.fp32_residual_connection = config.fp32_residual_connection
650
+ self.post_layer_norm = config.post_layer_norm
651
+
652
+ # Number of layers.
653
+ self.num_layers = config.num_layers
654
+
655
+ # Transformer layers.
656
+ def build_layer(layer_number):
657
+ return ProteinGLMBlock(config, layer_number, device=device)
658
+
659
+ self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
660
+
661
+ if self.post_layer_norm:
662
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
663
+ # Final layer norm before output.
664
+ self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon)
665
+
666
+ self.gradient_checkpointing = False
667
+
668
+ def _get_layer(self, layer_number):
669
+ return self.layers[layer_number]
670
+
671
+ def forward(
672
+ self, hidden_states, attention_mask, position_ids, kv_caches=None,
673
+ use_cache: Optional[bool] = True,
674
+ output_hidden_states: Optional[bool] = False,
675
+ ):
676
+ if not kv_caches:
677
+ kv_caches = [None for _ in range(self.num_layers)]
678
+ presents = () if use_cache else None
679
+ if self.gradient_checkpointing and self.training:
680
+ if use_cache:
681
+ logger.warning_once(
682
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
683
+ )
684
+ use_cache = False
685
+
686
+ all_self_attentions = None
687
+ all_hidden_states = () if output_hidden_states else None
688
+ for index in range(self.num_layers):
689
+ if output_hidden_states:
690
+ all_hidden_states = all_hidden_states + (hidden_states,)
691
+
692
+ layer = self._get_layer(index)
693
+ if self.gradient_checkpointing and self.training and torch.is_grad_enabled():
694
+ layer_ret = get_checkpoint_fn()(
695
+ layer,
696
+ hidden_states,
697
+ attention_mask,
698
+ position_ids,
699
+ kv_caches[index],
700
+ use_cache
701
+ )
702
+ else:
703
+ layer_ret = layer(
704
+ hidden_states,
705
+ attention_mask,
706
+ position_ids,
707
+ kv_cache=kv_caches[index],
708
+ use_cache=use_cache
709
+ )
710
+ hidden_states, kv_cache = layer_ret
711
+ if use_cache:
712
+ presents = presents + (kv_cache,)
713
+
714
+
715
+ # Final layer norm.
716
+ if self.post_layer_norm:
717
+ hidden_states = self.final_layernorm(hidden_states)
718
+
719
+ if output_hidden_states:
720
+ all_hidden_states = all_hidden_states + (hidden_states,)
721
+
722
+ return hidden_states, presents, all_hidden_states, all_self_attentions
723
+
724
+
725
+ class ProteinGLMPreTrainedModel(PreTrainedModel):
726
+ """
727
+ An abstract class to handle weights initialization and
728
+ a simple interface for downloading and loading pretrained models.
729
+ """
730
+
731
+ is_parallelizable = False
732
+ supports_gradient_checkpointing = True
733
+ config_class = ProteinGLMConfig
734
+ base_model_prefix = "transformer"
735
+ _no_split_modules = ["ProteinGLMBlock"]
736
+
737
+ _quantized = False
738
+
739
+
740
+ def get_masks(self, input_ids, past_key_values, padding_mask=None, is_causal=True):
741
+ batch_size, seq_length = input_ids.shape
742
+ full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
743
+ if is_causal:
744
+ full_attention_mask.tril_()
745
+ past_length = 0
746
+ if past_key_values:
747
+ past_length = past_key_values[0][0].shape[0]
748
+ if past_length:
749
+ full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
750
+ device=input_ids.device), full_attention_mask), dim=-1)
751
+ if padding_mask is not None:
752
+ full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
753
+ if not past_length and padding_mask is not None:
754
+ full_attention_mask -= padding_mask.unsqueeze(-1) - 1
755
+ full_attention_mask = (full_attention_mask < 0.5).bool()
756
+ full_attention_mask.unsqueeze_(1)
757
+ return full_attention_mask
758
+
759
+ def get_position_ids(self, input_ids, device, context_length=0):
760
+ batch_size, seq_length = input_ids.shape
761
+ if self.config.rotary_embedding_2d:
762
+ if self.config.is_causal: # 100b model
763
+ position_ids_1 = torch.zeros(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
764
+ position_ids_2 = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
765
+ position_ids = torch.stack([position_ids_1, position_ids_2], axis=1) # [batch_size, 2, seq_len]
766
+ else:
767
+ position_ids_1 = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
768
+ position_ids_2 = torch.zeros(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len]
769
+ position_ids = torch.stack([position_ids_1, position_ids_2], axis=1) # [batch_size, 2, seq_len]
770
+ else:
771
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) # [batch_size, 1, seq_len]
772
+ return position_ids
773
+
774
+ def _set_gradient_checkpointing(self, module, value=False):
775
+ if isinstance(module, ProteinGLMTransformer):
776
+ module.gradient_checkpointing = value
777
+
778
+
779
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
780
+ def _init_weights(self, module):
781
+ std = self.config.initializer_range
782
+ """Initialize the weights"""
783
+ if isinstance(module, nn.Linear):
784
+ # Slightly different from the TF version which uses truncated_normal for initialization
785
+ # cf https://github.com/pytorch/pytorch/pull/5617
786
+ module.weight.data.normal_(mean=0.0, std=std)
787
+ if module.bias is not None:
788
+ module.bias.data.zero_()
789
+ elif isinstance(module, nn.Embedding):
790
+ module.weight.data.normal_(mean=0.0, std=std)
791
+ if module.padding_idx is not None:
792
+ module.weight.data[module.padding_idx].zero_()
793
+ elif isinstance(module, nn.LayerNorm):
794
+ module.bias.data.zero_()
795
+ module.weight.data.fill_(1.0)
796
+
797
+ def quantize(self, weight_bit_width: int, empty_init=True, device=None):
798
+ if self._quantized:
799
+ print(f"Model has been quantized...")
800
+ return
801
+ self.transformer.encoder = quantize(self.transformer.encoder, weight_bit_width, empty_init, device)
802
+ self._quantized = True
803
+ return self
804
+
805
+ class Embedding(torch.nn.Module):
806
+ """Language model embeddings."""
807
+
808
+ def __init__(self, config: ProteinGLMConfig, device=None):
809
+ super(Embedding, self).__init__()
810
+
811
+ self.hidden_size = config.hidden_size
812
+ # Word embeddings (parallel).
813
+ self.word_embeddings = nn.Embedding(
814
+ config.padded_vocab_size,
815
+ self.hidden_size,
816
+ dtype=config.torch_dtype,
817
+ device=device
818
+ )
819
+ self.fp32_residual_connection = config.fp32_residual_connection
820
+
821
+
822
+ def forward(self, input_ids):
823
+ # Embeddings.
824
+ words_embeddings = self.word_embeddings(input_ids)
825
+ embeddings = words_embeddings
826
+ # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
827
+ embeddings = embeddings.transpose(0, 1).contiguous()
828
+ # If the input flag for fp32 residual connection is set, convert for float.
829
+ if self.fp32_residual_connection:
830
+ embeddings = embeddings.float()
831
+ return embeddings
832
+
833
+ class ProteinGLMModel(ProteinGLMPreTrainedModel):
834
+ def __init__(self, config: ProteinGLMConfig, device=None, empty_init=True):
835
+ super().__init__(config)
836
+ if empty_init:
837
+ init_method = skip_init
838
+ else:
839
+ init_method = default_init
840
+ init_kwargs = {}
841
+ if device is not None:
842
+ init_kwargs["device"] = device
843
+ self.embedding = init_method(Embedding, config, **init_kwargs)
844
+ self.num_layers = config.num_layers
845
+ self.multi_query_group_num = config.multi_query_group_num
846
+ self.kv_channels = config.kv_channels
847
+
848
+ # Rotary positional embeddings
849
+ self.seq_length = config.seq_length
850
+ rotary_dim = (
851
+ config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
852
+ )
853
+
854
+ # self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, base=10000, precision=config.torch_dtype, learnable=False)
855
+ self.encoder = init_method(ProteinGLMTransformer, config, **init_kwargs)
856
+
857
+ self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
858
+ dtype=config.torch_dtype, **init_kwargs)
859
+
860
+ def get_input_embeddings(self):
861
+ return self.embedding.word_embeddings
862
+
863
+ def set_input_embeddings(self, value):
864
+ self.embedding.word_embeddings = value
865
+
866
+ def forward(
867
+ self,
868
+ input_ids,
869
+ position_ids: Optional[torch.Tensor] = None, # position_ids: [batch_size, 2, seq_len]
870
+ attention_mask: Optional[torch.BoolTensor] = None,
871
+ full_attention_mask: Optional[torch.BoolTensor] = None,
872
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
873
+ inputs_embeds: Optional[torch.Tensor] = None,
874
+ use_cache: Optional[bool] = None,
875
+ output_hidden_states: Optional[bool] = None,
876
+ return_dict: Optional[bool] = None,
877
+ ):
878
+ output_hidden_states = (
879
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
880
+ )
881
+ if self.config.is_causal:
882
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
883
+ else:
884
+ use_cache = False
885
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
886
+
887
+ batch_size, seq_length = input_ids.shape
888
+
889
+ if inputs_embeds is None:
890
+ inputs_embeds = self.embedding(input_ids)
891
+
892
+ if full_attention_mask is None:
893
+ if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
894
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
895
+ # Run encoder.
896
+ hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
897
+ inputs_embeds, full_attention_mask, position_ids=position_ids,
898
+ kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
899
+ )
900
+
901
+ if not return_dict:
902
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
903
+
904
+ return BaseModelOutputWithPast(
905
+ last_hidden_state=hidden_states,
906
+ past_key_values=presents,
907
+ hidden_states=all_hidden_states,
908
+ attentions=all_self_attentions,
909
+ )
910
+
911
+
912
+ class ProteinGLMForMaskedLM(ProteinGLMPreTrainedModel):
913
+ def __init__(self, config: ProteinGLMConfig, empty_init=True, device=None):
914
+ super().__init__(config)
915
+
916
+ self.max_sequence_length = config.max_length
917
+ self.transformer = ProteinGLMModel(config, empty_init=empty_init, device=device)
918
+ self.config = config
919
+ if self.config.quantization_bit:
920
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
921
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
922
+
923
+ def forward(
924
+ self,
925
+ input_ids: Optional[torch.Tensor] = None,
926
+ position_ids: Optional[torch.Tensor] = None,
927
+ attention_mask: Optional[torch.Tensor] = None,
928
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
929
+ inputs_embeds: Optional[torch.Tensor] = None,
930
+ labels: Optional[torch.Tensor] = None,
931
+ use_cache: Optional[bool] = None,
932
+ output_attentions: Optional[bool] = None,
933
+ output_hidden_states: Optional[bool] = None,
934
+ return_dict: Optional[bool] = None,
935
+ return_last_logit: Optional[bool] = None,
936
+ return_last_hidden_state: Optional[bool] = None
937
+ ):
938
+ if self.config.is_causal:
939
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
940
+ else:
941
+ use_cache = False
942
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
943
+
944
+ if position_ids is None:
945
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
946
+
947
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask, is_causal=self.config.is_causal)
948
+
949
+ transformer_outputs = self.transformer(
950
+ input_ids=input_ids,
951
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
952
+ full_attention_mask=full_attention_mask,
953
+ past_key_values=past_key_values,
954
+ inputs_embeds=inputs_embeds,
955
+ use_cache=use_cache,
956
+ output_hidden_states=output_hidden_states,
957
+ return_dict=return_dict,
958
+ )
959
+
960
+ hidden_states = transformer_outputs[0]
961
+ if return_last_logit:
962
+ hidden_states = hidden_states[-1:]
963
+ lm_logits = self.transformer.output_layer(hidden_states)
964
+ lm_logits = lm_logits.transpose(0, 1).contiguous()
965
+
966
+ masked_lm_loss = None
967
+ if labels is not None:
968
+ lm_logits = lm_logits.to(torch.float32)
969
+
970
+ # Flatten the tokens
971
+ loss_fct = CrossEntropyLoss(ignore_index=-100) # -100 for padding token.
972
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
973
+
974
+ lm_logits = lm_logits.to(hidden_states.dtype)
975
+ loss = loss.to(hidden_states.dtype)
976
+
977
+ if not return_dict:
978
+ output = (lm_logits,) + transformer_outputs[1:]
979
+ return ((loss,) + output) if loss is not None else output
980
+ return MaskedLMOutput(
981
+ loss = masked_lm_loss,
982
+ logits=lm_logits,
983
+ hidden_states=transformer_outputs.last_hidden_state if return_last_hidden_state else transformer_outputs.hidden_states,
984
+ attentions=transformer_outputs.attentions,
985
+ )
986
+
987
+
988
+
989
+
990
+ class ProteinGLMForSequenceClassification(ProteinGLMPreTrainedModel):
991
+ def __init__(self, config: ProteinGLMConfig, empty_init=True, device=None):
992
+ super().__init__(config)
993
+ self.config = config
994
+ self.num_labels = config.num_labels
995
+
996
+ self.transformer = ProteinGLMModel(config, empty_init=empty_init, device=device)
997
+ self.classifier = ProteinGLMClassificationHead(config)
998
+ if self.config.quantization_bit:
999
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
1000
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
1001
+
1002
+ def forward(
1003
+ self,
1004
+ input_ids: Optional[torch.Tensor] = None,
1005
+ position_ids: Optional[torch.Tensor] = None,
1006
+ attention_mask: Optional[torch.Tensor] = None,
1007
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1008
+ inputs_embeds: Optional[torch.Tensor] = None,
1009
+ labels: Optional[torch.Tensor] = None,
1010
+ use_cache: Optional[bool] = None,
1011
+ output_attentions: Optional[bool] = None,
1012
+ output_hidden_states: Optional[bool] = None,
1013
+ return_dict: Optional[bool] = None,
1014
+ return_last_logit: Optional[bool] = None,
1015
+ return_last_hidden_state: Optional[bool] = None,
1016
+ **kwargs
1017
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1018
+ r"""
1019
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1020
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1021
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1022
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1023
+ """
1024
+ if self.config.is_causal:
1025
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1026
+ else:
1027
+ use_cache = False
1028
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1029
+
1030
+ if position_ids is None:
1031
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
1032
+
1033
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask, is_causal=self.config.is_causal)
1034
+
1035
+ transformer_outputs = self.transformer(
1036
+ input_ids=input_ids,
1037
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
1038
+ full_attention_mask=full_attention_mask,
1039
+ past_key_values=past_key_values,
1040
+ inputs_embeds=inputs_embeds,
1041
+ use_cache=use_cache,
1042
+ output_hidden_states=output_hidden_states,
1043
+ return_dict=return_dict,
1044
+ )
1045
+ if self.config.add_special_tokens:
1046
+ hidden_states = transformer_outputs[0][:-1] # get rid of <eos> token
1047
+ else:
1048
+ hidden_states = transformer_outputs[0]
1049
+ logits = self.classifier(hidden_states, add_pooling=True)
1050
+ loss = None
1051
+ if labels is not None:
1052
+ labels = labels.to(logits.device)
1053
+
1054
+ if self.config.problem_type is None:
1055
+ if self.num_labels == 1:
1056
+ self.config.problem_type = "regression"
1057
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1058
+ self.config.problem_type = "single_label_classification"
1059
+ else:
1060
+ self.config.problem_type = "multi_label_classification"
1061
+
1062
+ if self.config.problem_type == "regression":
1063
+ loss_fct = MSELoss()
1064
+ if self.num_labels == 1:
1065
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1066
+ else:
1067
+ loss = loss_fct(logits, labels)
1068
+ elif self.config.problem_type == "single_label_classification":
1069
+ loss_fct = CrossEntropyLoss()
1070
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1071
+ elif self.config.problem_type == "multi_label_classification":
1072
+ loss_fct = BCEWithLogitsLoss()
1073
+ loss = loss_fct(logits, labels)
1074
+
1075
+ if not return_dict:
1076
+ output = (logits,) + transformer_outputs[2:]
1077
+ return ((loss,) + output) if loss is not None else output
1078
+
1079
+ return SequenceClassifierOutput(
1080
+ loss=loss,
1081
+ logits=logits,
1082
+ hidden_states=transformer_outputs.hidden_states,
1083
+ attentions=transformer_outputs.attentions,
1084
+ )
1085
+
1086
+ class ProteinGLMForTokenClassification(ProteinGLMPreTrainedModel):
1087
+ def __init__(self, config: ProteinGLMConfig, empty_init=True, device=None):
1088
+ super().__init__(config)
1089
+ self.config = config
1090
+ self.num_labels = config.num_labels
1091
+
1092
+ self.transformer = ProteinGLMModel(config, empty_init=empty_init, device=device)
1093
+ if config.task_modality == "token":
1094
+ self.classifier = ProteinGLMClassificationHead(config)
1095
+ elif config.task_modality == 'pair':
1096
+ self.classifier = ProteinGLMContactHead(config)
1097
+
1098
+ self.quantized = False
1099
+
1100
+ if self.config.quantization_bit:
1101
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
1102
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
1103
+
1104
+
1105
+ def forward(
1106
+ self,
1107
+ input_ids: Optional[torch.Tensor] = None,
1108
+ position_ids: Optional[torch.Tensor] = None,
1109
+ attention_mask: Optional[torch.Tensor] = None,
1110
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1111
+ inputs_embeds: Optional[torch.Tensor] = None,
1112
+ labels: Optional[torch.Tensor] = None,
1113
+ use_cache: Optional[bool] = None,
1114
+ output_attentions: Optional[bool] = None,
1115
+ output_hidden_states: Optional[bool] = None,
1116
+ return_dict: Optional[bool] = None,
1117
+ return_last_logit: Optional[bool] = None,
1118
+ return_last_hidden_state: Optional[bool] = None,
1119
+ **kwargs
1120
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1121
+ r"""
1122
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1123
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1124
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1125
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1126
+ """
1127
+ if self.config.is_causal:
1128
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1129
+ else:
1130
+ use_cache = False
1131
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1132
+
1133
+ if position_ids is None:
1134
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
1135
+
1136
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask, is_causal = self.config.is_causal)
1137
+
1138
+ transformer_outputs = self.transformer(
1139
+ input_ids=input_ids,
1140
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
1141
+ full_attention_mask=full_attention_mask,
1142
+ past_key_values=past_key_values,
1143
+ inputs_embeds=inputs_embeds,
1144
+ use_cache=use_cache,
1145
+ output_hidden_states=output_hidden_states,
1146
+ return_dict=return_dict,
1147
+ )
1148
+ if self.config.add_special_tokens:
1149
+ hidden_states = transformer_outputs[0][:-1] # get rid of <eos> token
1150
+ else:
1151
+ hidden_states = transformer_outputs[0]
1152
+
1153
+ logits = self.classifier(hidden_states, add_pooling=False)
1154
+ loss = None
1155
+ if labels is not None:
1156
+ labels = labels.to(logits.device)
1157
+ loss_fct = CrossEntropyLoss()
1158
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1159
+
1160
+ if not return_dict:
1161
+ output = (logits,) + transformer_outputs[2:]
1162
+ return ((loss,) + output) if loss is not None else output
1163
+
1164
+
1165
+ return TokenClassifierOutput(
1166
+ loss=loss,
1167
+ logits=logits,
1168
+ hidden_states=transformer_outputs.hidden_states,
1169
+ attentions=transformer_outputs.attentions,
1170
+ )
1171
+
1172
+
1173
+
1174
+ class ProteinGLMClassificationHead(nn.Module):
1175
+ """Head for classification tasks."""
1176
+ def __init__(self, config):
1177
+ super().__init__()
1178
+ self.activation_func = config.activation_func
1179
+ self.layers = torch.nn.ModuleList()
1180
+ last_size = config.hidden_size
1181
+ for sz in config.inter_hidden_size:
1182
+ this_layer = torch.nn.Linear(last_size, sz, bias=config.bias)
1183
+ last_size = sz
1184
+ self.layers.append(this_layer)
1185
+
1186
+ def forward(self,
1187
+ input_features,
1188
+ add_pooling: Optional[bool] = True
1189
+ ):
1190
+ # [s, b, h] -> [b, s ,h]
1191
+ input_features = input_features.transpose(0,1).contiguous()
1192
+ if add_pooling:
1193
+ # [b, h]
1194
+ input_features = torch.mean(input_features, dim = 1)
1195
+ for i, layer in enumerate(self.layers):
1196
+ if i > 0:
1197
+ input_features = self.activation_func(input_features)
1198
+ input_features = layer(input_features)
1199
+ return input_features
1200
+
1201
+ class ProteinGLMContactHead(nn.Module):
1202
+ """Head for sentence-level classification tasks."""
1203
+ def __init__(self, config):
1204
+ super().__init__()
1205
+ self.activation_func = config.activation_func
1206
+ self.layers = torch.nn.ModuleList()
1207
+ last_size = config.hidden_size * 2
1208
+ for sz in config.inter_hidden_size:
1209
+ this_layer = torch.nn.Linear(last_size, sz, bias=config.bias)
1210
+ last_size = sz
1211
+ self.layers.append(this_layer)
1212
+
1213
+ def outer_concat(self, x):
1214
+ batch_size, seq_len, features = x.shape
1215
+
1216
+ # Permute to [batch_size, features, seq_len]
1217
+ x = x.permute(0, 2, 1)
1218
+
1219
+ # Introduce new dimensions for broadcasting
1220
+ x_1 = x[:, None, :, :, None] # [batch_size, 1, features, seq_len, 1]
1221
+ x_2 = x[:, None, :, None, :] # [batch_size, 1, features, 1, seq_len]
1222
+
1223
+ # Repeat along new dimensions
1224
+ x_1 = x_1.repeat(1, 1, 1, 1, seq_len) # [batch_size, 1, features, seq_len, seq_len]
1225
+ x_2 = x_2.repeat(1, 1, 1, seq_len, 1) # [batch_size, 1, features, seq_len, seq_len]
1226
+
1227
+ # Concatenate along the second dimension
1228
+ x = torch.cat((x_1, x_2), dim=1) # [batch_size, 2, features, seq_len, seq_len]
1229
+
1230
+ # Get lower triangular indices
1231
+ I, J = torch.tril_indices(seq_len, seq_len, -1)
1232
+
1233
+ # Symmetrize
1234
+ x[:, :, :, I, J] = x[:, :, :, J, I]
1235
+
1236
+ # Permute to desired shape and make contiguous
1237
+ x = x.permute(0, 3, 4, 2, 1).contiguous() # [batch_size, seq_len, seq_len, features, 2]
1238
+
1239
+ # Reshape to combine the last two dimensions
1240
+ x = x.view(batch_size, seq_len, seq_len, features * 2) # [batch_size, seq_len, seq_len, features * 2]
1241
+
1242
+ return x
1243
+
1244
+ def forward(self,
1245
+ input_features,
1246
+ add_pooling: Optional[bool] = True
1247
+ ):
1248
+ # [s, b, h] -> [b, s ,h]
1249
+ input_features = input_features.transpose(0,1).contiguous()
1250
+ input_features = self.outer_concat(input_features)
1251
+ for i, layer in enumerate(self.layers):
1252
+ if i > 0:
1253
+ input_features = self.activation_func(input_features)
1254
+ input_features = layer(input_features)
1255
+ return input_features
1256
+
1257
+
1258
+
1259
+
1260
+
1261
+ class ProteinGLMForCasualLM(ProteinGLMPreTrainedModel):
1262
+ def __init__(self, config: ProteinGLMConfig, empty_init=True, device=None):
1263
+ super().__init__(config)
1264
+
1265
+ self.max_sequence_length = config.max_length
1266
+ self.transformer = ProteinGLMModel(config, empty_init=empty_init, device=device)
1267
+ self.config = config
1268
+ if self.config.quantization_bit:
1269
+ print(f"Begin Quantization to {self.config.quantization_bit} bit")
1270
+ self.quantize(self.config.quantization_bit, empty_init=True, device=device)
1271
+
1272
+ def _update_model_kwargs_for_generation(
1273
+ self,
1274
+ outputs: ModelOutput,
1275
+ model_kwargs: Dict[str, Any],
1276
+ is_encoder_decoder: bool = False,
1277
+ ) -> Dict[str, Any]:
1278
+ # update past_key_values
1279
+ cache_name, cache = self._extract_past_from_model_output(outputs)
1280
+ model_kwargs[cache_name] = cache
1281
+
1282
+ # update attention mask
1283
+ if "attention_mask" in model_kwargs:
1284
+ attention_mask = model_kwargs["attention_mask"]
1285
+ model_kwargs["attention_mask"] = torch.cat(
1286
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
1287
+ )
1288
+
1289
+ # update position ids
1290
+ if "position_ids" in model_kwargs:
1291
+ position_ids = model_kwargs["position_ids"]
1292
+ new_position_id = position_ids[..., -1:].clone() # [batch_size, 2, 1]
1293
+ if self.config.rotary_embedding_2d:
1294
+ new_position_id[:, 1] += 1 # Only update the 2nd dimension
1295
+ else:
1296
+ new_position_id[:] += 1
1297
+ model_kwargs["position_ids"] = torch.cat(
1298
+ [position_ids, new_position_id], dim=-1
1299
+ ) # [batch_size, 2, seq_len+1]
1300
+
1301
+ model_kwargs["is_first_forward"] = False
1302
+ return model_kwargs
1303
+
1304
+ def prepare_inputs_for_generation(
1305
+ self,
1306
+ input_ids: torch.LongTensor,
1307
+ past_key_values: Optional[torch.Tensor] = None,
1308
+ attention_mask: Optional[torch.Tensor] = None,
1309
+ position_ids: Optional[torch.Tensor] = None,
1310
+ use_cache: Optional[bool] = None,
1311
+ is_first_forward: bool = True,
1312
+ **kwargs
1313
+ ) -> dict:
1314
+ # only last token for input_ids if past is not None
1315
+ if position_ids is None:
1316
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device) # position_ids: [batch_size, 2, seq_len]
1317
+ if not is_first_forward:
1318
+ if past_key_values is not None:
1319
+ position_ids = position_ids[..., -1:]
1320
+ input_ids = input_ids[:, -1:]
1321
+ return {
1322
+ "input_ids": input_ids,
1323
+ "past_key_values": past_key_values,
1324
+ "position_ids": position_ids,
1325
+ "attention_mask": attention_mask,
1326
+ "return_last_logit": True,
1327
+ "use_cache": use_cache
1328
+ }
1329
+
1330
+ def forward(
1331
+ self,
1332
+ input_ids: Optional[torch.Tensor] = None,
1333
+ position_ids: Optional[torch.Tensor] = None,
1334
+ attention_mask: Optional[torch.Tensor] = None,
1335
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1336
+ inputs_embeds: Optional[torch.Tensor] = None,
1337
+ labels: Optional[torch.Tensor] = None,
1338
+ use_cache: Optional[bool] = None,
1339
+ output_attentions: Optional[bool] = None,
1340
+ output_hidden_states: Optional[bool] = None,
1341
+ return_dict: Optional[bool] = None,
1342
+ return_last_logit: Optional[bool] = False
1343
+ ):
1344
+ if self.config.is_causal:
1345
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1346
+ else:
1347
+ use_cache = False
1348
+
1349
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1350
+
1351
+ if position_ids is None:
1352
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
1353
+
1354
+ transformer_outputs = self.transformer(
1355
+ input_ids=input_ids,
1356
+ position_ids=position_ids, # position_ids: [batch_size, 2, seq_len]
1357
+ attention_mask=attention_mask,
1358
+ past_key_values=past_key_values,
1359
+ inputs_embeds=inputs_embeds,
1360
+ use_cache=use_cache,
1361
+ output_hidden_states=output_hidden_states,
1362
+ return_dict=return_dict
1363
+ )
1364
+ hidden_states = transformer_outputs[0]
1365
+ if return_last_logit:
1366
+ hidden_states = hidden_states[-1:]
1367
+ lm_logits = self.transformer.output_layer(hidden_states)
1368
+ lm_logits = lm_logits.transpose(0, 1).contiguous()
1369
+
1370
+ loss = None
1371
+ if labels is not None:
1372
+ lm_logits = lm_logits.to(torch.float32)
1373
+
1374
+ # Shift so that tokens < n predict n
1375
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1376
+ shift_labels = labels[..., 1:].contiguous()
1377
+ # Flatten the tokens
1378
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1379
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1380
+
1381
+ lm_logits = lm_logits.to(hidden_states.dtype)
1382
+ loss = loss.to(hidden_states.dtype)
1383
+
1384
+ if not return_dict:
1385
+ output = (lm_logits,) + transformer_outputs[1:]
1386
+ return ((loss,) + output) if loss is not None else output
1387
+
1388
+ return CausalLMOutputWithPast(
1389
+ loss=loss,
1390
+ logits=lm_logits,
1391
+ past_key_values=transformer_outputs.past_key_values,
1392
+ hidden_states=transformer_outputs.hidden_states,
1393
+ attentions=transformer_outputs.attentions,
1394
+ )
1395
+
1396
+ @staticmethod
1397
+ def _reorder_cache(
1398
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1399
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1400
+ """
1401
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1402
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1403
+ beam_idx at every generation step.
1404
+
1405
+ Output shares the same memory storage as `past`.
1406
+ """
1407
+ return tuple(
1408
+ (
1409
+ layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
1410
+ layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
1411
+ )
1412
+ for layer_past in past
1413
+ )
1414
+
1415
+ @torch.inference_mode()
1416
+ def chat(self, tokenizer, query: str, max_length: int = 256, num_beams=1, do_sample=True,
1417
+ top_p=1.0, temperature=1.0, logits_processor=None, **kwargs):
1418
+ if logits_processor is None:
1419
+ logits_processor = LogitsProcessorList()
1420
+ logits_processor.append(InvalidScoreLogitsProcessor())
1421
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1422
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1423
+ inputs = tokenizer.apply_chat_template(query, add_generation_prompt=True, tokenize=True,
1424
+ return_tensors="pt", return_dict=True)
1425
+ position_ids = self.get_position_ids(inputs['input_ids'], device=self.device) # TODO: ADD BATCH
1426
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<eop>")]
1427
+ inputs["position_ids"] = position_ids
1428
+ inputs = inputs.to(self.device)
1429
+ outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)
1430
+ outputs = outputs.tolist()[0][3:] # 3 for generation prompt "<gmask><sop><eos>"
1431
+ if outputs[-1] in eos_token_id:
1432
+ outputs = outputs[:-1]
1433
+ response = tokenizer.decode(outputs)
1434
+ return response
1435
+
1436
+ # TODO: fix bug in streaming chat
1437
+ @torch.inference_mode()
1438
+ def stream_chat(self, tokenizer, query: str, max_length: int = 56, num_beams=1, do_sample=True,
1439
+ top_p=0.8, temperature=0.8, logits_processor=None, past_key_values = None, **kwargs):
1440
+ if logits_processor is None:
1441
+ logits_processor = LogitsProcessorList()
1442
+ logits_processor.append(InvalidScoreLogitsProcessor())
1443
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<eop>")]
1444
+ gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
1445
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
1446
+ inputs = tokenizer.apply_chat_template(query, add_generation_prompt=True, tokenize=True,
1447
+ return_tensors="pt", return_dict=True)
1448
+ position_ids = self.get_position_ids(inputs['input_ids'], device=self.device) # TODO: ADD BATCH
1449
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<eop>")]
1450
+ inputs["position_ids"] = position_ids
1451
+ inputs = inputs.to(self.device)
1452
+ offset = 3 # 3 for generation prompt
1453
+ for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,
1454
+ eos_token_id=eos_token_id, return_past_key_values=False,
1455
+ **gen_kwargs):
1456
+ outputs = outputs.tolist()[0][3:]
1457
+ if outputs[-1] in eos_token_id:
1458
+ outputs = outputs[:-1]
1459
+ # offset = 3 + len(outputs)
1460
+ response = tokenizer.decode(outputs)
1461
+ if response:
1462
+ yield response
1463
+
1464
+ @torch.inference_mode()
1465
+ def stream_generate(
1466
+ self,
1467
+ input_ids,
1468
+ generation_config: Optional[GenerationConfig] = None,
1469
+ logits_processor: Optional[LogitsProcessorList] = None,
1470
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1471
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1472
+ return_past_key_values=False,
1473
+ **kwargs,
1474
+ ):
1475
+ breakpoint()
1476
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1477
+
1478
+ if generation_config is None:
1479
+ generation_config = self.generation_config
1480
+ generation_config = copy.deepcopy(generation_config)
1481
+ model_kwargs = generation_config.update(**kwargs)
1482
+ model_kwargs["use_cache"] = generation_config.use_cache
1483
+ bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1484
+
1485
+ if isinstance(eos_token_id, int):
1486
+ eos_token_id = [eos_token_id]
1487
+ eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
1488
+
1489
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1490
+ if has_default_max_length and generation_config.max_new_tokens is None:
1491
+ warnings.warn(
1492
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1493
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1494
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
1495
+ UserWarning,
1496
+ )
1497
+ elif generation_config.max_new_tokens is not None:
1498
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1499
+ if not has_default_max_length:
1500
+ logger.warn(
1501
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1502
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1503
+ "Please refer to the documentation for more information. "
1504
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1505
+ UserWarning,
1506
+ )
1507
+
1508
+ if input_ids_seq_length >= generation_config.max_length:
1509
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1510
+ logger.warning(
1511
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1512
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1513
+ " increasing `max_new_tokens`."
1514
+ )
1515
+
1516
+ # 2. Set generation parameters if not already defined
1517
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1518
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1519
+
1520
+ logits_processor = self._get_logits_processor(
1521
+ generation_config=generation_config,
1522
+ input_ids_seq_length=input_ids_seq_length,
1523
+ encoder_input_ids=input_ids,
1524
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1525
+ logits_processor=logits_processor,
1526
+ )
1527
+
1528
+ stopping_criteria = self._get_stopping_criteria(
1529
+ generation_config=generation_config, stopping_criteria=stopping_criteria
1530
+ )
1531
+ logits_warper = self._get_logits_warper(generation_config)
1532
+
1533
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1534
+ scores = None
1535
+ while True:
1536
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1537
+ # forward pass to get next token
1538
+ outputs = self(
1539
+ **model_inputs,
1540
+ return_dict=True,
1541
+ output_attentions=False,
1542
+ output_hidden_states=False,
1543
+ )
1544
+
1545
+ next_token_logits = outputs.logits[:, -1, :]
1546
+
1547
+ # pre-process distribution
1548
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1549
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1550
+
1551
+ # sample
1552
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1553
+ if generation_config.do_sample:
1554
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1555
+ else:
1556
+ next_tokens = torch.argmax(probs, dim=-1)
1557
+ # update generated ids, model inputs, and length for next step
1558
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1559
+ model_kwargs = self._update_model_kwargs_for_generation(
1560
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1561
+ )
1562
+ unfinished_sequences = unfinished_sequences.mul(
1563
+ next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
1564
+ )
1565
+ if return_past_key_values:
1566
+ yield input_ids, outputs.past_key_values
1567
+ else:
1568
+ yield input_ids
1569
+ # stop when each sentence is finished, or if we exceed the maximum length
1570
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1571
+ break
tokenization_proteinglm.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tokenization classes for ProteinGLM."""
2
+
3
+ import os
4
+ from typing import List, Optional, Union, Dict, Any
5
+ from torch import TensorType
6
+ from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
7
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
8
+
9
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
10
+
11
+
12
+ def load_vocab_file(vocab_file: str) -> List[str]:
13
+ with open(vocab_file, "r") as f:
14
+ lines = f.read().splitlines()
15
+ return [line.strip() for line in lines]
16
+
17
+
18
+ class ProteinGLMTokenizer(PreTrainedTokenizer):
19
+ """
20
+ Constructs a ProteinGLM tokenizer.
21
+ """
22
+
23
+ vocab_files_names = VOCAB_FILES_NAMES
24
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
25
+ def __init__(
26
+ self,
27
+ vocab_file: str,
28
+ unk_token: str = "<unk>",
29
+ pad_token: str = "<pad>",
30
+ mask_token: str = "<mask>",
31
+ eos_token: str = "<eos>",
32
+ model_max_length: int = 2048,
33
+ additional_special_tokens: Optional[List[str]] = None,
34
+ **kwargs,
35
+ ):
36
+ self.all_tokens = load_vocab_file(vocab_file)
37
+ self._id_to_token = dict(enumerate(self.all_tokens))
38
+ self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
39
+
40
+ if additional_special_tokens is None:
41
+ additional_special_tokens = ['<pad>', '<mask>', '<gmask>', '<smask>', '<eod>', '<sop>', '<eop>', '<eos>', '<unk>']
42
+
43
+ super().__init__(
44
+ unk_token=unk_token,
45
+ pad_token=pad_token,
46
+ mask_token=mask_token,
47
+ eos_token=eos_token,
48
+ model_max_length=model_max_length,
49
+ additional_special_tokens=additional_special_tokens,
50
+ **kwargs,
51
+ )
52
+
53
+ self.unique_no_split_tokens = self.all_tokens
54
+ self._update_trie(self.unique_no_split_tokens)
55
+
56
+ def _convert_id_to_token(self, index: int) -> str:
57
+ return self._id_to_token.get(index, self.unk_token)
58
+
59
+ def _convert_token_to_id(self, token: str) -> int:
60
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
61
+
62
+ def _tokenize(self, text: str, **kwargs) -> List[str]:
63
+ return text.split()
64
+
65
+ def get_vocab(self) -> dict:
66
+ base_vocab = self._token_to_id.copy()
67
+ base_vocab.update(self.added_tokens_encoder)
68
+ return base_vocab
69
+
70
+ def token_to_id(self, token: str) -> int:
71
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
72
+
73
+ def id_to_token(self, index: int) -> str:
74
+ return self._id_to_token.get(index, self.unk_token)
75
+
76
+ def build_inputs_with_special_tokens(
77
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
78
+ ) -> List[int]:
79
+ sep = [self.eos_token_id]
80
+ if token_ids_1 is None:
81
+ if self.eos_token_id is None:
82
+ return token_ids_0
83
+ else:
84
+ return token_ids_0 + sep
85
+ elif self.eos_token_id is None:
86
+ raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!")
87
+ return token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token
88
+
89
+
90
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
91
+ vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "tokenizer.model")
92
+ with open(vocab_file, "w") as f:
93
+ f.write("\n".join(self.all_tokens))
94
+ return (vocab_file,)
95
+
96
+ @property
97
+ def vocab_size(self) -> int:
98
+ return len(self.all_tokens)
99
+
100
+ def apply_chat_template(
101
+ self,
102
+ query,
103
+ add_generation_prompt: bool = True,
104
+ tokenize: bool = True,
105
+ padding: bool = False,
106
+ truncation: bool = False,
107
+ max_length: Optional[int] = None,
108
+ return_tensors: Optional[Union[str, TensorType]] = None,
109
+ return_dict: bool = False,
110
+ tokenizer_kwargs: Optional[Dict[str, Any]] = None,
111
+ add_special_tokens: bool = True,
112
+ **kwargs,
113
+ ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
114
+
115
+ generation_prompt = "<gmask><sop><eos>"
116
+ if isinstance(query, str):
117
+ query = [query]
118
+ prompt_query = []
119
+ if add_generation_prompt:
120
+ for each in query:
121
+ assert isinstance(each, str)
122
+ prompt_query.append(generation_prompt+each)
123
+ else:
124
+ prompt_query = query
125
+ if tokenize:
126
+ output = self.batch_encode_plus(
127
+ prompt_query,
128
+ padding=padding,
129
+ truncation=truncation,
130
+ max_length=max_length,
131
+ return_tensors=return_tensors,
132
+ is_split_into_words=True,
133
+ add_special_tokens=False
134
+ )
135
+ if return_dict:
136
+ return output
137
+ else:
138
+ return output["input_ids"]
139
+ else:
140
+ return prompt_query
tokenizer_config.json CHANGED
@@ -86,7 +86,7 @@
86
  ],
87
  "auto_map": {
88
  "AutoTokenizer": [
89
- "tokenization_xtrimopglm.xTrimoPGLMTokenizer",
90
  null
91
  ]
92
  },
@@ -95,6 +95,6 @@
95
  "mask_token": "<mask>",
96
  "model_max_length": 2048,
97
  "pad_token": "<pad>",
98
- "tokenizer_class": "xTrimoPGLMTokenizer",
99
  "unk_token": "<unk>"
100
  }
 
86
  ],
87
  "auto_map": {
88
  "AutoTokenizer": [
89
+ "tokenization_proteinglm.ProteinGLMTokenizer",
90
  null
91
  ]
92
  },
 
95
  "mask_token": "<mask>",
96
  "model_max_length": 2048,
97
  "pad_token": "<pad>",
98
+ "tokenizer_class": "ProteinGLMTokenizer",
99
  "unk_token": "<unk>"
100
  }