jxm commited on
Commit
88513b3
·
verified ·
1 Parent(s): 2e39642

re-add source w residual

Browse files
Files changed (1) hide show
  1. model.py +466 -51
model.py CHANGED
@@ -1,17 +1,439 @@
1
- from typing import Optional
 
 
2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import copy
 
 
 
 
4
  import torch
5
  import torch.nn as nn
6
  import transformers
7
 
8
- from cde.lib.dist import print0
9
- from cde.lib.tensor import mean_pool, mean_pool_3d, mean_pool_weighted, last_token_pool
10
 
11
- from cde.lib import load_embedder_and_tokenizer, ContextualModelConfig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
 
14
- gpt_tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  def limit_layers(model: transformers.PreTrainedModel, n_layers: int) -> None:
17
  if hasattr(model, 'transformer'):
@@ -27,6 +449,7 @@ def limit_layers(model: transformers.PreTrainedModel, n_layers: int) -> None:
27
  model.encoder.layer = model.encoder.layer[:n_layers]
28
  else:
29
  raise RuntimeError(f"unknown how to limit layers of model {type(model)}")
 
30
 
31
 
32
  def disable_dropout(model: torch.nn.Module):
@@ -78,8 +501,7 @@ class ContextualModelMixin(nn.Module):
78
 
79
  def _prepare_dataset_embeddings(
80
  self,
81
- input_ids: torch.Tensor,
82
- dataset_embeddings: torch.Tensor,
83
  null_dataset_embedding: bool = False,
84
  ) -> torch.Tensor:
85
  if not isinstance(dataset_embeddings, torch.Tensor):
@@ -89,6 +511,9 @@ class ContextualModelMixin(nn.Module):
89
  # Auto-expand for a batch.
90
  dataset_embeddings = dataset_embeddings[None, :, :] # (b, d) -> (1, b, d)
91
  dataset_embeddings = dataset_embeddings.to(input_ids.device)
 
 
 
92
 
93
  batch_size = input_ids.shape[0]
94
  if (self.transductive_tokens_per_document > 1):
@@ -107,9 +532,11 @@ class ContextualModelMixin(nn.Module):
107
  dataset_embeddings = dataset_embeddings[R].reshape((batch_size, self.num_corpus_tokens, self.hidden_size))
108
  else:
109
  dataset_embeddings = dataset_embeddings.reshape((1, self.num_corpus_tokens, self.hidden_size))
110
- # print("reshaped to dataset_embeddings.shape =", dataset_embeddings.shape)
111
 
112
- if dataset_embeddings.shape[1] > self.num_corpus_tokens:
 
 
 
113
  # If too many dataset embeddings are passed in, just take the first N until
114
  # we have the proper number.
115
  dataset_embeddings = dataset_embeddings[:, :self.num_corpus_tokens, :]
@@ -137,12 +564,24 @@ class ContextualModelMixin(nn.Module):
137
  soft_prompt = self.prompt_projection(soft_prompt).reshape((1, self.n_soft_prompt, self.hidden_size))
138
  soft_prompt = soft_prompt.expand((len(dataset_embeddings), -1, -1)) # -> (b, 4+b, d) # soft_prompt.repeat((len(input_ids), 1, 1))
139
  soft_prompt = torch.cat((dataset_embeddings, soft_prompt), dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
  return soft_prompt
142
 
143
-
144
  class BiEncoder(transformers.PreTrainedModel):
145
- config_class = ContextualModelConfig
146
  embedder: transformers.PreTrainedModel
147
  def __init__(
148
  self,
@@ -199,6 +638,7 @@ class BiEncoder(transformers.PreTrainedModel):
199
  attention_mask=attention_mask,
200
  ).last_hidden_state
201
  )
 
202
  if self.transductive_tokens_per_document > 1:
203
  document_embeddings = None
204
  batch_size, seq_length, output_dim = outputs.shape
@@ -233,7 +673,6 @@ class BiEncoder(transformers.PreTrainedModel):
233
  else:
234
  document_embeddings = document_embeddings.max(dim=1)
235
  output = self.mlp(document_embeddings)
236
- # breakpoint()
237
 
238
  if output_hidden_states:
239
  return {
@@ -258,9 +697,10 @@ class DatasetConditionedAutoregressive(transformers.PreTrainedModel, ContextualM
258
  self.contextual_init()
259
  disable_causality(self.backbone)
260
 
261
- self.pool_ignore_contextual_tokens = vars(self.config).get("pool_ignore_contextual_tokens", False)
262
- self.pool_ignore_instruction_tokens = vars(self.config).get("pool_ignore_instruction_tokens", False)
263
- self.pool_instruction_end_id = self.backbone.config.bos_token_id
 
264
 
265
  # Override contextual init
266
  self.output_projection = torch.nn.Sequential(
@@ -286,7 +726,7 @@ class DatasetConditionedAutoregressive(transformers.PreTrainedModel, ContextualM
286
  def _shift_rotary_embedding(self) -> None:
287
  disable_transductive_rotary_embedding = vars(self.config).get("disable_transductive_rotary_embedding", True)
288
  # TODO: Can we do this for LLAMA?
289
- print0("Warning: Positional embedding disabling not implemented for LLAMA.")
290
 
291
  def forward(
292
  self,
@@ -312,6 +752,7 @@ class DatasetConditionedAutoregressive(transformers.PreTrainedModel, ContextualM
312
  soft_prompt = soft_prompt.reshape(
313
  (soft_prompt.shape[0], -1, self.backbone_hidden_size)
314
  )
 
315
  # print("[DatasetConditionedAutoregressive] 2 -> soft_prompt.shape =", soft_prompt.shape)
316
 
317
  backbone_attention_mask = torch.ones(
@@ -333,34 +774,11 @@ class DatasetConditionedAutoregressive(transformers.PreTrainedModel, ContextualM
333
  output_hidden_states=True,
334
  ) # (1, 4 + b + s, d)
335
  # trim soft prompt
336
- output_vectors = output.hidden_states[-1]
337
  n_soft_prompt_tokens = soft_prompt.shape[1]
338
 
339
- if self.pool_ignore_instruction_tokens:
340
- # Denote the end of an instruction with an extra BOS token.
341
- # This is a bit arcane but relies on the fact that there will be a BOS token after the
342
- # instruction, but also there may or may not be a BOS token at the beginning.
343
- instruction_end_idx = (
344
- (input_ids == self.pool_instruction_end_id) &
345
- attention_mask &
346
- (torch.arange(input_ids.shape[1], device=input_ids.device)[None, :] > 0)
347
- ).int().argmax(1)
348
- is_instruction_token_mask = (
349
- torch.arange(input_ids.shape[1], device=input_ids.device)[None, :] <= instruction_end_idx[:, None]
350
- )
351
- # catch edge case where there is no instruction
352
- is_instruction_token_mask = is_instruction_token_mask.where(
353
- (instruction_end_idx > 0)[:, None], torch.zeros_like(is_instruction_token_mask)
354
- )
355
- input_attention_mask = torch.cat((
356
- backbone_attention_mask,
357
- attention_mask & ~is_instruction_token_mask), dim=1
358
- )
359
-
360
- output_attention_mask = input_attention_mask
361
- if self.pool_ignore_contextual_tokens:
362
- output_vectors = output_vectors[:, n_soft_prompt_tokens:, :]
363
- output_attention_mask = output_attention_mask[:, n_soft_prompt_tokens:]
364
 
365
  # Take last token position
366
  if vars(self.config).get("pooling_strategy") == "last_token":
@@ -371,6 +789,7 @@ class DatasetConditionedAutoregressive(transformers.PreTrainedModel, ContextualM
371
  output_pooled = mean_pool_weighted(output_vectors, output_attention_mask)
372
 
373
  # average with original vectors
 
374
  output = self.output_projection(output_pooled) # (b, 2d) -> (b, d)
375
 
376
  if output_hidden_states:
@@ -382,6 +801,7 @@ class DatasetConditionedAutoregressive(transformers.PreTrainedModel, ContextualM
382
  return output
383
 
384
 
 
385
  class DatasetConditionedBiencoder(transformers.PreTrainedModel, ContextualModelMixin):
386
  def __init__(
387
  self,
@@ -418,7 +838,7 @@ class DatasetConditionedBiencoder(transformers.PreTrainedModel, ContextualModelM
418
  if hasattr(module, "rotary_emb_dim"):
419
  module.rotary_start_pos = rotary_start_pos
420
  rotary_disabled += 1
421
- print0(f"modified {rotary_disabled} rotary modules – set rotary_start_pos to {rotary_start_pos}")
422
 
423
  def forward(
424
  self,
@@ -547,7 +967,7 @@ class ContextualDocumentEmbeddingTransformer(transformers.PreTrainedModel):
547
  ):
548
  super().__init__(config=config)
549
  dataset_backbone, _ = load_embedder_and_tokenizer(
550
- vars(config).get("dataset_backbone") or config.embedder
551
  )
552
 
553
  if config.limit_layers:
@@ -592,7 +1012,7 @@ class ContextualDocumentEmbeddingTransformer(transformers.PreTrainedModel):
592
  output_hidden_states: bool = False,
593
  ) -> torch.Tensor:
594
  """
595
- input_ids (long torch.Tensor) – ids of input tokens
596
  attention_mask (bool torch.Tensor)
597
  """
598
  dataset_embeddings = self.first_stage_model(
@@ -606,17 +1026,12 @@ class ContextualDocumentEmbeddingTransformer(transformers.PreTrainedModel):
606
  output_hidden_states=output_hidden_states,
607
  )
608
 
609
-
610
-
611
  def get_model_class(name: str):
612
  if name in 'transductive':
613
  return ContextualDocumentEmbeddingTransformer
614
  elif name == 'biencoder':
615
  return BiEncoder
616
- elif name == "biencoder_plus_plus":
617
- from cde.model_extra import BiEncoderPlusPlus
618
- return BiEncoderPlusPlus
619
  elif name == "dataset_prefix_biencoder":
620
  return DatasetPrefixBiencoder
621
  else:
622
- raise ValueError(f'unknown model cls {name}')
 
1
+ ###################################################################################################
2
+ ###################################################################################################
3
+ ###################################################################################################
4
 
5
+ import collections
6
+ import logging
7
+
8
+ import json
9
+ import math
10
+ import os
11
+ import re
12
+ from collections import OrderedDict
13
+ from functools import partial
14
+ from typing import List, Optional, Tuple, Union
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+
19
+
20
+
21
+ ########################################################
22
+ ########################################################
23
+ ########################################################
24
+ ########################################################
25
+
26
+
27
+ from typing import Callable, Optional, Tuple
28
  import copy
29
+ import math
30
+ import multiprocessing
31
+ import os
32
+
33
  import torch
34
  import torch.nn as nn
35
  import transformers
36
 
 
 
37
 
38
+ class ContextualModelConfig(transformers.configuration_utils.PretrainedConfig):
39
+ """We create a dummy configuration class that will just set properties
40
+ based on whatever kwargs we pass in.
41
+
42
+ When this class is initialized (see experiments.py) we pass in the
43
+ union of all data, model, and training args, all of which should
44
+ get saved to the config json.
45
+ """
46
+
47
+ def __init__(self, **kwargs):
48
+ for key, value in kwargs.items():
49
+ try:
50
+ json.dumps(value)
51
+ setattr(self, key, value)
52
+ except TypeError:
53
+ # value was not JSON-serializable, skip
54
+ continue
55
+ super().__init__()
56
+
57
+
58
+ def load_embedder_and_tokenizer(name: str) -> Tuple[
59
+ transformers.PreTrainedModel,
60
+ transformers.PreTrainedTokenizer
61
+ ]:
62
+ print("Loading model:", name)
63
+ if name.startswith("nomic") or (name == "bert-base-uncased"):
64
+ model = ContextualNomicBertForPreTraining.from_pretrained(name, trust_remote_code=True).bert
65
+ tokenizer = transformers.AutoTokenizer.from_pretrained(name)
66
+ elif name in ["gtr-base", "gtr_base"]:
67
+ model = transformers.AutoModel.from_pretrained(
68
+ "sentence-transformers/gtr-t5-base"
69
+ ).encoder
70
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
71
+ "sentence-transformers/gtr-t5-base"
72
+ )
73
+ elif name == "pile-t5-base-encoder":
74
+ model = transformers.AutoModel.from_pretrained(
75
+ "EleutherAI/pile-t5-base"
76
+ ).encoder
77
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
78
+ "EleutherAI/pile-t5-base"
79
+ )
80
+ tokenizer.pad_token = tokenizer.eos_token
81
+ elif name == "pile-t5-base-decoder":
82
+ model = transformers.AutoModel.from_pretrained(
83
+ "EleutherAI/pile-t5-base"
84
+ ).decoder
85
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
86
+ "EleutherAI/pile-t5-base"
87
+ )
88
+ tokenizer.pad_token = tokenizer.eos_token
89
+ elif name.startswith("gpt2") or name.startswith("meta-llama") or ("Llama" in name):
90
+ model = transformers.AutoModelForCausalLM.from_pretrained(
91
+ name,
92
+ # torch_dtype=torch.bfloat16,
93
+ attn_implementation="flash_attention_2",
94
+ low_cpu_mem_usage=True,
95
+ # device_map="auto",
96
+ )
97
+ model.padding_side = "right"
98
+ tokenizer = transformers.AutoTokenizer.from_pretrained(name)
99
+ tokenizer.pad_token = tokenizer.eos_token
100
+ tokenizer.add_eos_token = True
101
+ else:
102
+ model = transformers.AutoModel.from_pretrained(name, trust_remote_code=True)
103
+ tokenizer = transformers.AutoTokenizer.from_pretrained(name)
104
+
105
+ # if use_bettertransformer:
106
+ # from optimum.bettertransformer import BetterTransformer
107
+ # model = BetterTransformer.transform(model)
108
+ return model, tokenizer
109
+
110
+
111
+ def get_world_size() -> int:
112
+ try:
113
+ return torch.distributed.get_world_size()
114
+ except (RuntimeError, ValueError):
115
+ return 1
116
+
117
+
118
+ def get_rank() -> int:
119
+ try:
120
+ return torch.distributed.get_rank()
121
+ except (RuntimeError, ValueError):
122
+ return 0
123
+
124
+ def gather(t: torch.Tensor) -> torch.Tensor:
125
+ # torch.distributed.nn.all_gather scales by world size since the reduce op is SUM
126
+ # https://github.com/pytorch/pytorch/issues/58005
127
+ # only should use torch.distributed.nn.all_gather if we implement a `local_loss`
128
+ # like: https://github.com/mlfoundations/open_clip/issues/616
129
+ world_size = get_world_size()
130
+ if world_size == 1:
131
+ return t
132
+
133
+ if t.ndim == 0:
134
+ t = t.unsqueeze(0)
135
+
136
+ gathered = [torch.empty_like(t) for _ in range(world_size)]
137
+ torch.distributed.all_gather(gathered, t)
138
+ gathered[get_rank()] = t
139
+ return torch.cat(gathered, dim=0)
140
+
141
+
142
+ def gather_sum(t: torch.Tensor) -> torch.Tensor:
143
+ # torch.distributed.nn.all_gather scales by world size since the reduce op is SUM
144
+ # https://github.com/pytorch/pytorch/issues/58005
145
+ # only should use torch.distributed.nn.all_gather if we implement a `local_loss`
146
+ # like: https://github.com/mlfoundations/open_clip/issues/616
147
+ world_size = get_world_size()
148
+ if world_size == 1:
149
+ return t
150
+
151
+ if t.ndim == 0:
152
+ t = t.unsqueeze(0)
153
+
154
+ gathered = [torch.empty_like(t) for _ in range(world_size)]
155
+ torch.distributed.all_gather(gathered, t)
156
+ gathered = torch.stack(gathered, dim=0)
157
+ return gathered.sum(dim=0) # Sum across workers
158
 
159
 
160
+ def get_num_proc() -> int:
161
+ world_size: int = get_world_size()
162
+ try:
163
+ # os.sched_getaffinity respects schedulers, unlike cpu_count(), but it's only available
164
+ # on some Unix platforms, so we support both!
165
+ return len(os.sched_getaffinity(0)) // world_size # type: ignore[attr-defined]
166
+ except AttributeError:
167
+ return multiprocessing.cpu_count() // world_size
168
+
169
+
170
+ def torch_main_worker_finish_first(func: Callable):
171
+ def wrapper(*args, **kwargs):
172
+ # Get local rank (need to support non-DDP).
173
+ try:
174
+ local_rank = torch.distributed.get_rank()
175
+ ddp_enabled = True
176
+ except (RuntimeError, ValueError):
177
+ local_rank = -1
178
+ ddp_enabled = False
179
+ is_main_worker = local_rank <= 0
180
+ # Run on main worker first.
181
+ if is_main_worker:
182
+ result = func(*args, **kwargs)
183
+ # Then everyone waits.
184
+ if ddp_enabled:
185
+ torch.distributed.barrier()
186
+ # Run on other workers now.
187
+ if not is_main_worker:
188
+ result = func(*args, **kwargs)
189
+ # Now everyone waits again.
190
+ if ddp_enabled:
191
+ torch.distributed.barrier()
192
+ return result
193
+
194
+ return wrapper
195
+
196
+
197
+ def print0(*args, **kwargs) -> None:
198
+ if get_rank() == 0:
199
+ print(*args, **kwargs)
200
+
201
+
202
+ def verify_ddp_weights_equal(model: torch.nn.Module, atol: float = 1e-5) -> None:
203
+ if hasattr(model, "module"):
204
+ model = model.module
205
+
206
+ world_size = get_world_size()
207
+
208
+ if world_size > 8:
209
+ print0(f"[verify_ddp_weights_equal] Skipping with world_size={world_size} ⚠️")
210
+ return
211
+
212
+ for name, param in model.named_parameters():
213
+ if param is None: continue
214
+ if param.grad is None:
215
+ print0(f"[verify_ddp_weights_equal] Skipping param [{name}] with no grad")
216
+ continue
217
+ gathered_param = gather(param).reshape((world_size, -1))
218
+ absolute_diffs = (gathered_param[None, 0, :] - gathered_param).abs()
219
+ rank_params_eq = (absolute_diffs < atol).all()
220
+ assert rank_params_eq, f"❌ param [{name}] not equal - got max_absolute_diff={absolute_diffs.max()}"
221
+ ###################################################################################################################
222
+ gathered_param_grad = gather(param.grad).reshape((world_size, -1))
223
+ absolute_grad_diffs = (gathered_param_grad[None, 0, :] - gathered_param_grad).abs()
224
+ rank_grad_params_eq = (absolute_grad_diffs < atol).all()
225
+ assert rank_grad_params_eq, f"❌ param [{name}] grad not equal - got max_absolute_diff={absolute_grad_diffs.max()}"
226
+ ###################################################################################################################
227
+
228
+
229
+ print0("[verify_ddp_weights_equal] Verified DDP parameter correctness ✅")
230
+
231
+
232
+
233
+ def mean_pool_3d(
234
+ hidden_states: torch.Tensor, attention_mask: torch.Tensor
235
+ ) -> torch.Tensor:
236
+ B, T, S, D = hidden_states.shape
237
+ unmasked_outputs = hidden_states * attention_mask[..., None]
238
+ pooled_outputs = unmasked_outputs.sum(dim=2) / (attention_mask.sum(dim=2)[..., None] + 1e-9)
239
+
240
+ # fix for gradient flow: fill empty rows with the mean of the rest of the sequence
241
+ sequence_means = (
242
+ hidden_states.reshape((B, S * T, D))
243
+ .mean(dim=1, keepdim=True)
244
+ .expand(-1, T, -1)
245
+ )
246
+ pooled_outputs = pooled_outputs.where(
247
+ (attention_mask.sum(dim=2)[..., None] > 0),
248
+ sequence_means
249
+ )
250
+ assert pooled_outputs.shape == (B, T, D)
251
+
252
+ return pooled_outputs
253
+
254
+ def mean_pool(
255
+ hidden_states: torch.Tensor, attention_mask: torch.Tensor
256
+ ) -> torch.Tensor:
257
+ B, _S, D = hidden_states.shape
258
+ unmasked_outputs = hidden_states * attention_mask[..., None]
259
+ pooled_outputs = unmasked_outputs.sum(dim=1) / (attention_mask.sum(dim=1)[:, None] + 1e-20)
260
+
261
+ assert pooled_outputs.shape == (B, D)
262
+ return pooled_outputs
263
+
264
+
265
+ def mean_pool_weighted(
266
+ hidden_states: torch.Tensor, attention_mask: torch.Tensor
267
+ ) -> torch.Tensor:
268
+ B, _S, D = hidden_states.shape
269
+ attention_mask *= attention_mask.cumsum(dim=1) # [0,1,1,1,0,0] -> [0,1,2,3,0,0]
270
+ s = torch.sum(hidden_states * attention_mask.unsqueeze(-1).float(), dim=1)
271
+ d = attention_mask.sum(dim=1, keepdim=True).float()
272
+ return s / d
273
+
274
+
275
+ def slice_sparse_tensor_rows(t: torch.sparse.Tensor, min_row: int, max_row: int) -> torch.sparse.Tensor:
276
+ assert min_row < max_row, f"can't slice from row {min_row} to {max_row}"
277
+ t = t.coalesce()
278
+ row_idxs = t.indices()[0]
279
+ index_mask = (min_row <= row_idxs) & (row_idxs < max_row)
280
+
281
+ num_rows = (max_row - min_row)
282
+ num_cols = t.shape[1]
283
+
284
+ idxs = t.indices()[:, index_mask]
285
+ vals = t.values()[index_mask]
286
+ return torch.sparse_coo_tensor(idxs, vals, size=(num_rows, num_cols)).coalesce()
287
+
288
+
289
+ def slice_tensor_rows(t: torch.Tensor, min_row: int, max_row: int) -> torch.Tensor:
290
+ if t.is_sparse:
291
+ return slice_sparse_tensor_rows(t=t, min_row=min_row, max_row=max_row)
292
+ else:
293
+ return t[min_row:max_row]
294
+
295
+
296
+ @torch.no_grad
297
+ def maxsim(
298
+ X: torch.Tensor, y: torch.Tensor,
299
+ maximize: bool, chunk_size: int = 8_000,
300
+ debug_mem_usage: bool = False) -> torch.Tensor:
301
+ device = X.device
302
+ n_samples = X.shape[0]
303
+
304
+ max_sim_v = torch.zeros(n_samples, device=device, dtype=X.dtype)
305
+ max_sim_i = torch.zeros(n_samples, device=device, dtype=torch.int64)
306
+
307
+ # TODO: Implement faster max (without going to dense tensors).
308
+ # TODO: Use multiple GPUs.
309
+ rank = get_rank()
310
+ world_size = get_world_size()
311
+
312
+ worker_worklist_size = int(math.ceil(n_samples / world_size))
313
+ splits_start_idx = worker_worklist_size * rank
314
+ splits_end_idx = worker_worklist_size * (rank + 1)
315
+
316
+ for i in range(splits_start_idx, splits_end_idx, chunk_size):
317
+ start, end = i, min(i + chunk_size, n_samples)
318
+ sub_x = slice_tensor_rows(X, start, end)
319
+ if debug_mem_usage: print(f"[maxsim] step {i} cuda mem free/total = {torch.cuda.mem_get_info()}")
320
+ if debug_mem_usage: print("[maxsim] sub_x.shape:", sub_x.shape, "//", "y.shape:", y.shape)
321
+ sub_sim = sub_x @ y # TODO – Implement sparse max here to save mem!
322
+ sub_sim = sub_sim
323
+ if maximize:
324
+ sub_max_sim_v, sub_max_sim_i = sub_sim.to_dense().max(dim=-1)
325
+ else:
326
+ sub_max_sim_v, sub_max_sim_i = sub_sim.to_dense().min(dim=-1)
327
+ del sub_sim
328
+ del sub_x
329
+ torch.cuda.empty_cache() # needs to happen after maxsim for some reason.
330
+ max_sim_v[start: end] = sub_max_sim_v
331
+ max_sim_i[start: end] = sub_max_sim_i
332
+
333
+ # gather
334
+ max_sim_v = gather_sum(max_sim_v)
335
+ max_sim_i = gather_sum(max_sim_i)
336
+ k = y.shape[1]
337
+
338
+ assert max_sim_v.shape == (n_samples,)
339
+ assert max_sim_i.shape == (n_samples,)
340
+ assert max_sim_i.min() >= 0
341
+ assert max_sim_i.max() <= k
342
+
343
+ return max_sim_v, max_sim_i
344
+
345
+
346
+ def forward_batched(
347
+ model: torch.nn.Module,
348
+ input_ids: torch.Tensor,
349
+ attention_mask: torch.Tensor,
350
+ batch_size: int,
351
+ dataset_input_ids: Optional[torch.Tensor] = None,
352
+ dataset_attention_mask: Optional[torch.Tensor] = None,
353
+ **second_stage_model_kwargs,
354
+ ) -> torch.Tensor:
355
+ if hasattr(model, "module"):
356
+ model = model.module
357
+
358
+ if hasattr(model, "first_stage_model"):
359
+ # Support pooling over 3D dataset_input_ids inputs.
360
+ if len(dataset_input_ids.shape) == 2:
361
+ dataset_input_ids = dataset_input_ids[None]
362
+ dataset_attention_mask = dataset_attention_mask[None]
363
+
364
+ dataset_embeddings = []
365
+ for j in range(len(dataset_input_ids)):
366
+ i = 0
367
+ dataset_embeddings_batch = []
368
+ while i < dataset_input_ids.shape[1]:
369
+ dataset_embeddings_batch.append(
370
+ model.first_stage_model(
371
+ input_ids=dataset_input_ids[j][i:i+batch_size],
372
+ attention_mask=dataset_attention_mask[j][i:i+batch_size],
373
+ )
374
+ )
375
+ i += batch_size
376
+ dataset_embeddings.append(
377
+ torch.cat(dataset_embeddings_batch, dim=0)
378
+ )
379
+
380
+ # Automatically pool over 3D dataset_input_ids.
381
+ dataset_embeddings = torch.stack(dataset_embeddings, dim=0).mean(dim=0)
382
+
383
+ j = 0
384
+ outputs = []
385
+ while j < len(input_ids):
386
+ outputs.append(
387
+ model.second_stage_model(
388
+ input_ids=input_ids[j:j+batch_size],
389
+ attention_mask=attention_mask[j:j+batch_size],
390
+ dataset_embeddings=dataset_embeddings,
391
+ **second_stage_model_kwargs,
392
+ )
393
+ )
394
+ j += batch_size
395
+ return torch.cat(outputs, dim=0)
396
+
397
+ else:
398
+ i = 0
399
+ outputs = []
400
+ while i < len(input_ids):
401
+ outputs.append(
402
+ model(
403
+ input_ids=input_ids[i:i+batch_size],
404
+ attention_mask=attention_mask[i:i+batch_size],
405
+ **second_stage_model_kwargs,
406
+ )
407
+ )
408
+ i += batch_size
409
+ return torch.cat(outputs, dim=0)
410
+
411
+
412
+ def last_token_pool(hidden_state: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
413
+ # https://github.com/ContextualAI/gritlm/blob/main/gritlm/gritlm.py#L190
414
+ b, n, d = hidden_state.size()
415
+ # Get the last `1` in the attention mask of each item
416
+ # Often it is just `gather_indices = torch.argmin(attention_mask, 1, keepdim=False) - 1`
417
+ # except when 1) There's all 1's 2) There's 0's before the 1's
418
+ reversed_mask = torch.flip(attention_mask, dims=(1,))
419
+ argmax_reverse = torch.argmax(reversed_mask, dim=1, keepdim=False)
420
+ gather_indices = attention_mask.size(1) - argmax_reverse - 1
421
+ # If there are empty sequences, where the index would become -1 it will crash so set them to 0
422
+ gather_indices = torch.clamp(gather_indices, min=0)
423
+ # Turn indices from shape [b] -> [b, 1, d]
424
+ gather_indices = gather_indices.unsqueeze(-1).repeat(1, d)
425
+ gather_indices = gather_indices.unsqueeze(1)
426
+ assert gather_indices.shape == (b, 1, d)
427
+ # Gather along the seq len: [b, n, d] -> [b, d]
428
+ # Actually no need for the attention mask as we gather the last token where attn_mask=1 but
429
+ # as some indices (which shouldn't be attended to) may be 0 due to clamp, use mask to ignore them again
430
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand((b, n, d)).float()
431
+ return torch.gather(hidden_state * input_mask_expanded, 1, gather_indices).squeeze(dim=1)
432
+
433
+ def print0(*args, **kwargs) -> None:
434
+ if get_rank() == 0:
435
+ print(*args, **kwargs)
436
+
437
 
438
  def limit_layers(model: transformers.PreTrainedModel, n_layers: int) -> None:
439
  if hasattr(model, 'transformer'):
 
449
  model.encoder.layer = model.encoder.layer[:n_layers]
450
  else:
451
  raise RuntimeError(f"unknown how to limit layers of model {type(model)}")
452
+
453
 
454
 
455
  def disable_dropout(model: torch.nn.Module):
 
501
 
502
  def _prepare_dataset_embeddings(
503
  self,
504
+ input_ids: torch.Tensor, dataset_embeddings: torch.Tensor,
 
505
  null_dataset_embedding: bool = False,
506
  ) -> torch.Tensor:
507
  if not isinstance(dataset_embeddings, torch.Tensor):
 
511
  # Auto-expand for a batch.
512
  dataset_embeddings = dataset_embeddings[None, :, :] # (b, d) -> (1, b, d)
513
  dataset_embeddings = dataset_embeddings.to(input_ids.device)
514
+
515
+ if len(dataset_embeddings.shape) < 3:
516
+ raise ValueError(f"dataset_embeddings must have at least 3 dimensions, got {dataset_embeddings.shape}")
517
 
518
  batch_size = input_ids.shape[0]
519
  if (self.transductive_tokens_per_document > 1):
 
532
  dataset_embeddings = dataset_embeddings[R].reshape((batch_size, self.num_corpus_tokens, self.hidden_size))
533
  else:
534
  dataset_embeddings = dataset_embeddings.reshape((1, self.num_corpus_tokens, self.hidden_size))
 
535
 
536
+
537
+ if dataset_embeddings.shape[1] < self.num_corpus_tokens:
538
+ raise ValueError(f"dataset_embeddings must have at least {self.num_corpus_tokens} tokens, got {dataset_embeddings.shape[1]}")
539
+ elif dataset_embeddings.shape[1] > self.num_corpus_tokens:
540
  # If too many dataset embeddings are passed in, just take the first N until
541
  # we have the proper number.
542
  dataset_embeddings = dataset_embeddings[:, :self.num_corpus_tokens, :]
 
564
  soft_prompt = self.prompt_projection(soft_prompt).reshape((1, self.n_soft_prompt, self.hidden_size))
565
  soft_prompt = soft_prompt.expand((len(dataset_embeddings), -1, -1)) # -> (b, 4+b, d) # soft_prompt.repeat((len(input_ids), 1, 1))
566
  soft_prompt = torch.cat((dataset_embeddings, soft_prompt), dim=1)
567
+
568
+ # print(f"[ContextualModelMixin] soft_prompt.shape = {soft_prompt.shape}")
569
+
570
+ if self.training and self.randomize_dataset_sequence_order:
571
+ randomized_order = torch.stack(
572
+ [
573
+ torch.cat(
574
+ (
575
+ torch.randperm(corpus_size, device=soft_prompt.device),
576
+ torch.arange(self.n_soft_prompt, device=soft_prompt.device) + corpus_size
577
+ ), dim=0)
578
+ for _ in range(batch_size)])
579
+ randomized_order = randomized_order.to(soft_prompt.device)
580
+ soft_prompt = soft_prompt.gather(1, randomized_order[..., None].expand_as(soft_prompt))
581
 
582
  return soft_prompt
583
 
 
584
  class BiEncoder(transformers.PreTrainedModel):
 
585
  embedder: transformers.PreTrainedModel
586
  def __init__(
587
  self,
 
638
  attention_mask=attention_mask,
639
  ).last_hidden_state
640
  )
641
+
642
  if self.transductive_tokens_per_document > 1:
643
  document_embeddings = None
644
  batch_size, seq_length, output_dim = outputs.shape
 
673
  else:
674
  document_embeddings = document_embeddings.max(dim=1)
675
  output = self.mlp(document_embeddings)
 
676
 
677
  if output_hidden_states:
678
  return {
 
697
  self.contextual_init()
698
  disable_causality(self.backbone)
699
 
700
+ self.input_ln = torch.nn.LayerNorm(
701
+ self.backbone_hidden_size,
702
+ eps=1e-5
703
+ )
704
 
705
  # Override contextual init
706
  self.output_projection = torch.nn.Sequential(
 
726
  def _shift_rotary_embedding(self) -> None:
727
  disable_transductive_rotary_embedding = vars(self.config).get("disable_transductive_rotary_embedding", True)
728
  # TODO: Can we do this for LLAMA?
729
+ print("Warning: Positional embedding disabling not implemented for LLAMA.")
730
 
731
  def forward(
732
  self,
 
752
  soft_prompt = soft_prompt.reshape(
753
  (soft_prompt.shape[0], -1, self.backbone_hidden_size)
754
  )
755
+ soft_prompt = self.input_ln(soft_prompt)
756
  # print("[DatasetConditionedAutoregressive] 2 -> soft_prompt.shape =", soft_prompt.shape)
757
 
758
  backbone_attention_mask = torch.ones(
 
774
  output_hidden_states=True,
775
  ) # (1, 4 + b + s, d)
776
  # trim soft prompt
777
+ last_hidden_state = output.hidden_states[-1]
778
  n_soft_prompt_tokens = soft_prompt.shape[1]
779
 
780
+ output_vectors = last_hidden_state[:, n_soft_prompt_tokens:, :]
781
+ output_attention_mask = input_attention_mask[:, n_soft_prompt_tokens:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
782
 
783
  # Take last token position
784
  if vars(self.config).get("pooling_strategy") == "last_token":
 
789
  output_pooled = mean_pool_weighted(output_vectors, output_attention_mask)
790
 
791
  # average with original vectors
792
+ # TODO: Argparse for pooling strategy.
793
  output = self.output_projection(output_pooled) # (b, 2d) -> (b, d)
794
 
795
  if output_hidden_states:
 
801
  return output
802
 
803
 
804
+
805
  class DatasetConditionedBiencoder(transformers.PreTrainedModel, ContextualModelMixin):
806
  def __init__(
807
  self,
 
838
  if hasattr(module, "rotary_emb_dim"):
839
  module.rotary_start_pos = rotary_start_pos
840
  rotary_disabled += 1
841
+ print0(f"modified {rotary_disabled} rotary modules – set rotary_start_pos to {rotary_start_pos}")
842
 
843
  def forward(
844
  self,
 
967
  ):
968
  super().__init__(config=config)
969
  dataset_backbone, _ = load_embedder_and_tokenizer(
970
+ vars(config).get("dataset_backbone", config.embedder)
971
  )
972
 
973
  if config.limit_layers:
 
1012
  output_hidden_states: bool = False,
1013
  ) -> torch.Tensor:
1014
  """
1015
+ input_ids (long torch.Tensor) – ids of input tokens
1016
  attention_mask (bool torch.Tensor)
1017
  """
1018
  dataset_embeddings = self.first_stage_model(
 
1026
  output_hidden_states=output_hidden_states,
1027
  )
1028
 
 
 
1029
  def get_model_class(name: str):
1030
  if name in 'transductive':
1031
  return ContextualDocumentEmbeddingTransformer
1032
  elif name == 'biencoder':
1033
  return BiEncoder
 
 
 
1034
  elif name == "dataset_prefix_biencoder":
1035
  return DatasetPrefixBiencoder
1036
  else:
1037
+ raise ValueError(f'unknown model cls {name}')