lhallee commited on
Commit
caf1ade
·
verified ·
1 Parent(s): 1e40c1b

Upload modeling_esm_plusplus.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_esm_plusplus.py +77 -31
modeling_esm_plusplus.py CHANGED
@@ -13,6 +13,7 @@ import os
13
  import torch
14
  import torch.nn as nn
15
  import torch.nn.functional as F
 
16
  from dataclasses import dataclass
17
  from functools import cache, partial
18
  from pathlib import Path
@@ -506,79 +507,124 @@ class Pooler:
506
  self.pooling_options = {
507
  'mean': self.mean_pooling,
508
  'max': self.max_pooling,
509
- 'min': self.min_pooling,
510
  'norm': self.norm_pooling,
511
- 'prod': self.prod_pooling,
512
  'median': self.median_pooling,
513
  'std': self.std_pooling,
514
  'var': self.var_pooling,
515
  'cls': self.cls_pooling,
 
516
  }
517
 
518
- def mean_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
519
  if attention_mask is None:
520
  return emb.mean(dim=1)
521
  else:
522
  attention_mask = attention_mask.unsqueeze(-1)
523
  return (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
524
 
525
- def max_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
526
  if attention_mask is None:
527
  return emb.max(dim=1).values
528
  else:
529
  attention_mask = attention_mask.unsqueeze(-1)
530
  return (emb * attention_mask).max(dim=1).values
531
-
532
- def min_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
533
- if attention_mask is None:
534
- return emb.min(dim=1).values
535
- else:
536
- attention_mask = attention_mask.unsqueeze(-1)
537
- return (emb * attention_mask).min(dim=1).values
538
 
539
- def norm_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
540
  if attention_mask is None:
541
  return emb.norm(dim=1, p=2)
542
  else:
543
  attention_mask = attention_mask.unsqueeze(-1)
544
  return (emb * attention_mask).norm(dim=1, p=2)
545
 
546
- def prod_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
547
- length = emb.shape[1]
548
- if attention_mask is None:
549
- return emb.prod(dim=1) / length
550
- else:
551
- attention_mask = attention_mask.unsqueeze(-1)
552
- return ((emb * attention_mask).prod(dim=1) / attention_mask.sum(dim=1)) / length
553
-
554
- def median_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
555
  if attention_mask is None:
556
  return emb.median(dim=1).values
557
  else:
558
  attention_mask = attention_mask.unsqueeze(-1)
559
  return (emb * attention_mask).median(dim=1).values
560
 
561
- def std_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
562
  if attention_mask is None:
563
  return emb.std(dim=1)
564
  else:
565
- attention_mask = attention_mask.unsqueeze(-1)
566
- return (emb * attention_mask).std(dim=1)
 
567
 
568
- def var_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
569
  if attention_mask is None:
570
  return emb.var(dim=1)
571
  else:
572
- attention_mask = attention_mask.unsqueeze(-1)
573
- return (emb * attention_mask).var(dim=1)
574
-
575
- def cls_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
 
 
 
 
 
 
 
 
576
  return emb[:, 0, :]
577
 
578
- def __call__(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # [mean, max]
 
 
 
 
 
579
  final_emb = []
580
  for pooling_type in self.pooling_types:
581
- final_emb.append(self.pooling_options[pooling_type](emb, attention_mask)) # (b, d)
582
  return torch.cat(final_emb, dim=-1) # (b, n_pooling_types * d)
583
 
584
 
 
13
  import torch
14
  import torch.nn as nn
15
  import torch.nn.functional as F
16
+ import networkx as nx
17
  from dataclasses import dataclass
18
  from functools import cache, partial
19
  from pathlib import Path
 
507
  self.pooling_options = {
508
  'mean': self.mean_pooling,
509
  'max': self.max_pooling,
 
510
  'norm': self.norm_pooling,
 
511
  'median': self.median_pooling,
512
  'std': self.std_pooling,
513
  'var': self.var_pooling,
514
  'cls': self.cls_pooling,
515
+ 'parti': self._pool_parti,
516
  }
517
 
518
+ def _create_pooled_matrices_across_layers(self, attentions: torch.Tensor) -> torch.Tensor:
519
+ maxed_attentions = torch.max(attentions, dim=1)[0]
520
+ return maxed_attentions
521
+
522
+ def _page_rank(self, attention_matrix, personalization=None, nstart=None, prune_type="top_k_outdegree"):
523
+ # Run PageRank on the attention matrix converted to a graph.
524
+ # Raises exceptions if the graph doesn't match the token sequence or has no edges.
525
+ # Returns the PageRank scores for each token node.
526
+ G = self._convert_to_graph(attention_matrix)
527
+ if G.number_of_nodes() != attention_matrix.shape[0]:
528
+ raise Exception(
529
+ f"The number of nodes in the graph should be equal to the number of tokens in sequence! You have {G.number_of_nodes()} nodes for {attention_matrix.shape[0]} tokens.")
530
+ if G.number_of_edges() == 0:
531
+ raise Exception(f"You don't seem to have any attention edges left in the graph.")
532
+
533
+ return nx.pagerank(G, alpha=0.85, tol=1e-06, weight='weight', personalization=personalization, nstart=nstart, max_iter=100)
534
+
535
+ def _convert_to_graph(self, matrix):
536
+ # Convert a matrix (e.g., attention scores) to a directed graph using networkx.
537
+ # Each element in the matrix represents a directed edge with a weight.
538
+ G = nx.from_numpy_array(matrix, create_using=nx.DiGraph)
539
+ return G
540
+
541
+ def _calculate_importance_weights(self, dict_importance, attention_mask: Optional[torch.Tensor] = None):
542
+ # Remove keys where attention_mask is 0
543
+ if attention_mask is not None:
544
+ for k in list(dict_importance.keys()):
545
+ if attention_mask[k] == 0:
546
+ del dict_importance[k]
547
+
548
+ #dict_importance[0] # remove cls
549
+ #dict_importance[-1] # remove eos
550
+ total = sum(dict_importance.values())
551
+ return np.array([v / total for _, v in dict_importance.items()])
552
+
553
+ def _pool_parti(self, emb: torch.Tensor, attentions: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): # (b, L, d) -> (b, d)
554
+ maxed_attentions = self._create_pooled_matrices_across_layers(attentions).numpy()
555
+ # emb is (b, L, d), maxed_attentions is (b, L, L)
556
+ emb_pooled = []
557
+ for e, a, mask in zip(emb, maxed_attentions, attention_mask):
558
+ dict_importance = self._page_rank(a)
559
+ importance_weights = self._calculate_importance_weights(dict_importance, mask)
560
+ num_tokens = int(mask.sum().item())
561
+ emb_pooled.append(np.average(e[:num_tokens], weights=importance_weights, axis=0))
562
+ pooled = torch.tensor(np.array(emb_pooled))
563
+ return pooled
564
+
565
+ def mean_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
566
  if attention_mask is None:
567
  return emb.mean(dim=1)
568
  else:
569
  attention_mask = attention_mask.unsqueeze(-1)
570
  return (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
571
 
572
+ def max_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
573
  if attention_mask is None:
574
  return emb.max(dim=1).values
575
  else:
576
  attention_mask = attention_mask.unsqueeze(-1)
577
  return (emb * attention_mask).max(dim=1).values
 
 
 
 
 
 
 
578
 
579
+ def norm_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
580
  if attention_mask is None:
581
  return emb.norm(dim=1, p=2)
582
  else:
583
  attention_mask = attention_mask.unsqueeze(-1)
584
  return (emb * attention_mask).norm(dim=1, p=2)
585
 
586
+ def median_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
 
 
 
 
 
 
 
 
587
  if attention_mask is None:
588
  return emb.median(dim=1).values
589
  else:
590
  attention_mask = attention_mask.unsqueeze(-1)
591
  return (emb * attention_mask).median(dim=1).values
592
 
593
+ def std_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
594
  if attention_mask is None:
595
  return emb.std(dim=1)
596
  else:
597
+ # Compute variance correctly over non-masked positions, then take sqrt
598
+ var = self.var_pooling(emb, attention_mask, **kwargs)
599
+ return torch.sqrt(var)
600
 
601
+ def var_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
602
  if attention_mask is None:
603
  return emb.var(dim=1)
604
  else:
605
+ # Correctly compute variance over only non-masked positions
606
+ attention_mask = attention_mask.unsqueeze(-1) # (b, L, 1)
607
+ # Compute mean over non-masked positions
608
+ mean = (emb * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) # (b, d)
609
+ mean = mean.unsqueeze(1) # (b, 1, d)
610
+ # Compute squared differences from mean, only over non-masked positions
611
+ squared_diff = (emb - mean) ** 2 # (b, L, d)
612
+ # Sum squared differences over non-masked positions and divide by count
613
+ var = (squared_diff * attention_mask).sum(dim=1) / attention_mask.sum(dim=1) # (b, d)
614
+ return var
615
+
616
+ def cls_pooling(self, emb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs): # (b, L, d) -> (b, d)
617
  return emb[:, 0, :]
618
 
619
+ def __call__(
620
+ self,
621
+ emb: torch.Tensor,
622
+ attention_mask: Optional[torch.Tensor] = None,
623
+ attentions: Optional[torch.Tensor] = None
624
+ ): # [mean, max]
625
  final_emb = []
626
  for pooling_type in self.pooling_types:
627
+ final_emb.append(self.pooling_options[pooling_type](emb=emb, attention_mask=attention_mask, attentions=attentions)) # (b, d)
628
  return torch.cat(final_emb, dim=-1) # (b, n_pooling_types * d)
629
 
630