timlawrenz commited on
Commit
a4b5533
·
verified ·
1 Parent(s): e2a5f5d

Upload src/loss.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/loss.py +595 -0
src/loss.py ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Loss functions for AST reconstruction tasks and contrastive learning.
3
+
4
+ This module provides loss functions for:
5
+ 1. Measuring the difference between original and reconstructed Abstract Syntax Trees
6
+ 2. Contrastive learning between code and text embeddings for alignment
7
+ """
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from torch_geometric.data import Data
12
+ from typing import Dict, Any, Union
13
+
14
+
15
+ def ast_reconstruction_loss_comprehensive(original: Data, reconstructed: Dict[str, Any],
16
+ node_weight: float = 1.0, parent_weight: float = 1.0) -> torch.Tensor:
17
+ """
18
+ Computes a comprehensive reconstruction loss for an AST.
19
+
20
+ This loss combines:
21
+ 1. Node Type Loss: Cross-entropy for predicting the correct node types.
22
+ 2. Parent Prediction Loss: Cross-entropy for predicting the correct parent for each node.
23
+ """
24
+ # --- Node Type Loss ---
25
+ recon_node_logits = reconstructed['node_features'].squeeze(0)
26
+
27
+ # Numerical stability: Clamp values to a reasonable range to prevent overflow
28
+ recon_node_logits = torch.clamp(recon_node_logits, min=-100, max=100)
29
+
30
+ true_node_types = original.x.argmax(dim=1)
31
+
32
+ num_nodes = min(recon_node_logits.size(0), true_node_types.size(0))
33
+ if num_nodes == 0:
34
+ return torch.tensor(0.0, device=original.x.device, requires_grad=True)
35
+
36
+ node_loss = F.cross_entropy(
37
+ recon_node_logits[:num_nodes],
38
+ true_node_types[:num_nodes]
39
+ )
40
+
41
+ # --- Parent Prediction Loss ---
42
+ recon_parent_logits = reconstructed['parent_logits'].squeeze(0) # [num_nodes, max_nodes]
43
+
44
+ # Numerical stability: Clamp values
45
+ recon_parent_logits = torch.clamp(recon_parent_logits, min=-100, max=100)
46
+
47
+ max_nodes = recon_parent_logits.size(1)
48
+
49
+ # Create the true parent labels
50
+ num_true_nodes = original.num_nodes
51
+ # Initialize with an ignore_index
52
+ ignore_index = -100
53
+ true_parents = torch.full((num_true_nodes,), ignore_index, dtype=torch.long, device=original.x.device)
54
+
55
+ # Edge index is [parent, child], so edge_index[0] are parents and edge_index[1] are children
56
+ children = original.edge_index[1]
57
+ parents = original.edge_index[0]
58
+
59
+ # Clamp parent indices to be within the prediction range [0, max_nodes-1]
60
+ valid_parents = torch.clamp(parents, 0, max_nodes - 1)
61
+ true_parents[children] = valid_parents
62
+
63
+ # We only care about the first num_nodes predictions and labels
64
+ num_nodes = min(recon_parent_logits.size(0), true_parents.size(0))
65
+
66
+ # Check if there are any valid parent labels to compute loss on
67
+ if (true_parents[:num_nodes] != ignore_index).any():
68
+ parent_loss = F.cross_entropy(
69
+ recon_parent_logits[:num_nodes],
70
+ true_parents[:num_nodes],
71
+ ignore_index=ignore_index
72
+ )
73
+ else:
74
+ # No valid parents to compute loss on (e.g., single-node graph)
75
+ parent_loss = torch.tensor(0.0, device=original.x.device, requires_grad=True)
76
+
77
+ # --- Total Loss ---
78
+ total_loss = (node_weight * node_loss) + (parent_weight * parent_loss)
79
+ return total_loss
80
+
81
+
82
+ def ast_reconstruction_loss(original: Data, reconstructed: Dict[str, Any],
83
+ node_weight: float = 1.0, edge_weight: float = 0.5) -> torch.Tensor:
84
+ """
85
+ Compute the reconstruction loss between original and reconstructed AST.
86
+
87
+ This loss function combines:
88
+ 1. Node Type Loss: Cross-entropy loss for predicting correct node types
89
+ 2. Edge Prediction Loss: Loss for predicting correct graph connectivity
90
+
91
+ Args:
92
+ original: Original AST as torch_geometric.data.Data object
93
+ reconstructed: Reconstructed AST from decoder containing:
94
+ - 'node_features': Tensor of shape [batch_size, num_nodes, feature_dim]
95
+ - 'edge_index': Edge connectivity (optional, for edge loss)
96
+ - 'batch': Batch indices
97
+ - 'num_nodes_per_graph': List of node counts per graph
98
+ node_weight: Weight for node type loss component
99
+ edge_weight: Weight for edge prediction loss component
100
+
101
+ Returns:
102
+ Scalar tensor representing the total reconstruction loss
103
+ """
104
+ # Extract original data
105
+ original_x = original.x # [total_nodes, feature_dim]
106
+ original_edge_index = original.edge_index # [2, total_edges]
107
+ original_batch = original.batch # [total_nodes]
108
+
109
+ # Extract reconstructed data
110
+ recon_node_features = reconstructed['node_features']
111
+ if recon_node_features.dim() == 2:
112
+ recon_node_features = recon_node_features.unsqueeze(0)
113
+
114
+ batch_size = recon_node_features.size(0)
115
+ max_nodes = recon_node_features.size(1)
116
+ feature_dim = recon_node_features.size(2)
117
+
118
+ # Compute node type loss
119
+ node_loss = compute_node_type_loss(original_x, recon_node_features, original_batch)
120
+
121
+ # Compute edge prediction loss (simplified version)
122
+ edge_loss = compute_edge_prediction_loss(original_edge_index, original_batch,
123
+ reconstructed, batch_size)
124
+
125
+ # Combine losses
126
+ total_loss = node_weight * node_loss + edge_weight * edge_loss
127
+
128
+ return total_loss
129
+
130
+
131
+ def compute_node_type_loss(original_x: torch.Tensor,
132
+ recon_node_features: torch.Tensor,
133
+ original_batch: torch.Tensor) -> torch.Tensor:
134
+ """
135
+ Compute cross-entropy loss for node type prediction.
136
+
137
+ Args:
138
+ original_x: Original node features [total_nodes, feature_dim] (one-hot encoded)
139
+ recon_node_features: Reconstructed features [batch_size, max_nodes, feature_dim] (logits)
140
+ original_batch: Batch indices for original nodes [total_nodes]
141
+
142
+ Returns:
143
+ Average cross-entropy loss across all nodes
144
+ """
145
+ if recon_node_features.dim() == 2:
146
+ recon_node_features = recon_node_features.unsqueeze(0)
147
+
148
+ batch_size = recon_node_features.size(0)
149
+ max_nodes = recon_node_features.size(1)
150
+ feature_dim = recon_node_features.size(2)
151
+
152
+ total_loss = 0.0
153
+ total_nodes = 0
154
+
155
+ # Process each graph in the batch
156
+ for batch_idx in range(batch_size):
157
+ # Get original nodes for this graph
158
+ mask = (original_batch == batch_idx)
159
+ if not mask.any():
160
+ continue
161
+
162
+ original_nodes = original_x[mask] # [num_nodes_in_graph, feature_dim]
163
+ num_original_nodes = original_nodes.size(0)
164
+
165
+ # Get reconstructed nodes for this graph (up to actual node count)
166
+ # Handle case where reconstruction has fewer nodes than original
167
+ num_recon_nodes = min(num_original_nodes, max_nodes)
168
+ recon_nodes = recon_node_features[batch_idx, :num_recon_nodes, :] # [num_recon_nodes, feature_dim]
169
+
170
+ # Numerical stability: Check for and handle NaN/Inf values in reconstructed logits
171
+ if torch.isnan(recon_nodes).any() or torch.isinf(recon_nodes).any():
172
+ # Replace NaN/Inf with safe values to prevent loss explosion
173
+ recon_nodes = torch.where(torch.isnan(recon_nodes), torch.zeros_like(recon_nodes), recon_nodes)
174
+ recon_nodes = torch.clamp(recon_nodes, min=-100, max=100) # Clamp to reasonable range
175
+
176
+ # Only use original nodes up to the number of reconstructed nodes
177
+ original_nodes_subset = original_nodes[:num_recon_nodes, :] # [num_recon_nodes, feature_dim]
178
+
179
+ # Convert one-hot original to class indices for cross-entropy
180
+ # Assumes original_x is one-hot encoded
181
+ original_classes = torch.argmax(original_nodes_subset, dim=1) # [num_recon_nodes]
182
+
183
+ # Compute cross-entropy loss
184
+ # recon_nodes are logits, original_classes are target class indices
185
+ loss = F.cross_entropy(recon_nodes, original_classes, reduction='sum')
186
+
187
+ total_loss += loss
188
+ total_nodes += num_recon_nodes
189
+
190
+ # Return average loss per node
191
+ if total_nodes > 0:
192
+ return total_loss / total_nodes
193
+ else:
194
+ return torch.tensor(0.0, device=original_x.device, requires_grad=True)
195
+
196
+
197
+ def compute_edge_prediction_loss(original_edge_index: torch.Tensor,
198
+ original_batch: torch.Tensor,
199
+ reconstructed: Dict[str, Any],
200
+ batch_size: int) -> torch.Tensor:
201
+ """
202
+ Compute edge prediction loss based on graph connectivity.
203
+
204
+ This is a simplified version that compares the number of edges per graph
205
+ rather than exact edge-to-edge matching, which would be more complex.
206
+
207
+ Args:
208
+ original_edge_index: Original edges [2, total_edges]
209
+ original_batch: Batch indices for original nodes [total_nodes]
210
+ reconstructed: Dictionary containing reconstruction info
211
+ batch_size: Number of graphs in batch
212
+
213
+ Returns:
214
+ Loss based on edge count differences
215
+ """
216
+ if original_edge_index.size(1) == 0:
217
+ # No edges in original, return zero loss
218
+ return torch.tensor(0.0, device=original_edge_index.device, requires_grad=True)
219
+
220
+ # --- Vectorized implementation to avoid CPU bottlenecks ---
221
+
222
+ # 1. Get the batch index for the source node of each edge
223
+ edge_batch_indices = original_batch[original_edge_index[0]]
224
+
225
+ # 2. Count the number of edges for each graph in the batch
226
+ # `bincount` is a highly optimized way to count occurrences of each index
227
+ original_edge_counts = torch.bincount(edge_batch_indices, minlength=batch_size).float()
228
+
229
+ # 3. Estimate reconstructed edge counts (maintaining original logic)
230
+ # Get the number of nodes in each graph of the batch
231
+ num_nodes_per_graph = torch.bincount(original_batch, minlength=batch_size).float()
232
+ # Estimate edge count as num_nodes - 1 (for a tree-like structure)
233
+ recon_edge_counts = torch.clamp(num_nodes_per_graph - 1, min=0)
234
+
235
+ # 4. Compute the loss as the mean squared error between the counts
236
+ # This is a single, fast, vectorized operation
237
+ loss = F.mse_loss(recon_edge_counts, original_edge_counts)
238
+
239
+ return loss
240
+
241
+
242
+ def ast_reconstruction_loss_improved(original: Data, reconstructed: Dict[str, Any],
243
+ type_weight: float = 1.0,
244
+ parent_weight: float = 1.0) -> torch.Tensor:
245
+ """
246
+ Improved AST reconstruction loss with explicit parent prediction for batches.
247
+
248
+ This loss function provides a strong structural learning signal by combining
249
+ node type prediction with explicit parent prediction for each node across an
250
+ entire batch of graphs.
251
+
252
+ Args:
253
+ original: A `torch_geometric.data.Batch` object containing a batch of original ASTs.
254
+ reconstructed: Reconstructed AST from the decoder, containing batched 'node_features'
255
+ and 'parent_logits'.
256
+ type_weight: Weight for the node type prediction loss.
257
+ parent_weight: Weight for the parent prediction loss.
258
+
259
+ Returns:
260
+ Scalar tensor representing the total weighted reconstruction loss for the batch.
261
+ """
262
+ # --- Component 1: Node Type Loss (Batched) ---
263
+ recon_node_logits = reconstructed['node_features'] # Shape: [total_nodes, feature_dim]
264
+ true_node_types = original.x.argmax(dim=1)
265
+
266
+ # The number of nodes should match between the batched original and reconstruction.
267
+ num_nodes = min(recon_node_logits.size(0), true_node_types.size(0))
268
+ if num_nodes == 0:
269
+ return torch.tensor(0.0, device=original.x.device, requires_grad=True)
270
+
271
+ type_loss = F.cross_entropy(
272
+ recon_node_logits[:num_nodes],
273
+ true_node_types[:num_nodes]
274
+ )
275
+
276
+ # --- Component 2: Parent Prediction Loss (Batched) ---
277
+ recon_parent_logits = reconstructed['parent_logits'] # Shape: [total_nodes, max_nodes]
278
+ max_nodes = recon_parent_logits.size(1)
279
+
280
+ # Create the ground truth parent labels for the entire batch.
281
+ num_true_nodes = original.num_nodes
282
+ ignore_index = -100
283
+ true_parents = torch.full((num_true_nodes,), ignore_index, dtype=torch.long, device=original.x.device)
284
+
285
+ # To correctly handle parent indices in a batch, we need to offset them.
286
+ # The parent of a node in graph `i` must be one of the nodes *within* graph `i`.
287
+ # We first create a global offset for each node.
288
+ num_nodes_per_graph = torch.bincount(original.batch)
289
+ node_offsets = torch.cumsum(num_nodes_per_graph, dim=0) - num_nodes_per_graph
290
+
291
+ # Offset the parent indices in the edge list.
292
+ children = original.edge_index[1]
293
+ parents = original.edge_index[0]
294
+
295
+ # The parent prediction is local to each graph. The `parent_predictor` outputs logits
296
+ # where the `j`-th logit corresponds to the `j`-th node *within that graph*.
297
+ # Therefore, we need to calculate the local parent index.
298
+ local_parents = parents - node_offsets[original.batch[parents]]
299
+
300
+ # Populate the true_parents tensor with the local parent indices.
301
+ # Clamp to ensure indices are within the prediction range [0, max_nodes-1].
302
+ valid_parents = torch.clamp(local_parents, 0, max_nodes - 1)
303
+ true_parents[children] = valid_parents
304
+
305
+ # Check if there are any valid parent-child relationships to compute loss on.
306
+ if (true_parents != ignore_index).any():
307
+ parent_loss = F.cross_entropy(
308
+ recon_parent_logits,
309
+ true_parents,
310
+ ignore_index=ignore_index
311
+ )
312
+ else:
313
+ parent_loss = torch.tensor(0.0, device=original.x.device)
314
+
315
+ # --- Total Loss ---
316
+ total_loss = (type_weight * type_loss) + (parent_weight * parent_loss)
317
+ return total_loss
318
+
319
+
320
+ def _compute_role_loss(original: Data, reconstructed: Dict[str, Any]) -> torch.Tensor:
321
+ """
322
+ Compute role loss component for improved AST reconstruction.
323
+
324
+ This function computes a loss that encourages the model to understand the
325
+ functional role of identifiers (e.g., method argument, local variable).
326
+
327
+ For backward compatibility with current one-hot node features, this implements
328
+ a simplified role-aware loss based on node types and graph structure.
329
+ In the future, this will use dedicated role embeddings.
330
+
331
+ Args:
332
+ original: Original AST data
333
+ reconstructed: Reconstructed AST data
334
+
335
+ Returns:
336
+ Scalar tensor representing the role loss
337
+ """
338
+ recon_node_features = reconstructed['node_features']
339
+ batch_size = recon_node_features.size(0)
340
+
341
+ # For backward compatibility, derive role information from node types and graph structure
342
+ # This is a simplified approach until dedicated role features are implemented
343
+
344
+ total_loss = 0.0
345
+ total_nodes = 0
346
+
347
+ for batch_idx in range(batch_size):
348
+ # Get original nodes for this graph
349
+ mask = (original.batch == batch_idx)
350
+ if not mask.any():
351
+ continue
352
+
353
+ original_nodes = original.x[mask] # [num_nodes_in_graph, feature_dim]
354
+ num_original_nodes = original_nodes.size(0)
355
+
356
+ # Get node types for role inference
357
+ original_node_types = torch.argmax(original_nodes, dim=1)
358
+
359
+ # Simple role-based loss: encourage consistency in how similar node types are handled
360
+ # This approximates role understanding until full role features are available
361
+ if num_original_nodes > 1:
362
+ # Create a simple role similarity matrix based on node types
363
+ type_similarity = (original_node_types.unsqueeze(0) == original_node_types.unsqueeze(1)).float()
364
+
365
+ # Get reconstructed features for this batch
366
+ max_nodes = min(num_original_nodes, recon_node_features.size(1))
367
+ recon_features = recon_node_features[batch_idx, :max_nodes, :]
368
+
369
+ # Compute pairwise similarities in reconstructed space
370
+ recon_normalized = F.normalize(recon_features, p=2, dim=1)
371
+ recon_similarity = torch.matmul(recon_normalized, recon_normalized.t())
372
+
373
+ # Encourage similar node types to have similar representations (role consistency)
374
+ role_consistency_loss = F.mse_loss(recon_similarity, type_similarity[:max_nodes, :max_nodes])
375
+ total_loss += role_consistency_loss
376
+ total_nodes += 1
377
+
378
+ # Return average loss
379
+ if total_nodes > 0:
380
+ avg_loss = total_loss / total_nodes
381
+ if isinstance(avg_loss, torch.Tensor):
382
+ return avg_loss.requires_grad_(True)
383
+ else:
384
+ return torch.tensor(avg_loss, device=original.x.device, requires_grad=True)
385
+ else:
386
+ return torch.tensor(0.0, device=original.x.device, requires_grad=True)
387
+
388
+
389
+ def _compute_name_loss(original: Data, reconstructed: Dict[str, Any]) -> torch.Tensor:
390
+ """
391
+ Compute name loss component for improved AST reconstruction.
392
+
393
+ This function computes a loss that lightly encourages the model to use
394
+ appropriate names while not penalizing heavily for choosing different
395
+ but valid names.
396
+
397
+ For backward compatibility with current features, this implements a
398
+ placeholder loss that encourages semantic consistency.
399
+ In the future, this will use dedicated name embeddings.
400
+
401
+ Args:
402
+ original: Original AST data
403
+ reconstructed: Reconstructed AST data
404
+
405
+ Returns:
406
+ Scalar tensor representing the name loss
407
+ """
408
+ recon_node_features = reconstructed['node_features']
409
+ batch_size = recon_node_features.size(0)
410
+
411
+ # For backward compatibility, implement a lightweight semantic consistency loss
412
+ # This will be replaced with proper name embedding loss in the future
413
+
414
+ total_loss = 0.0
415
+ total_nodes = 0
416
+
417
+ for batch_idx in range(batch_size):
418
+ # Get original nodes for this graph
419
+ mask = (original.batch == batch_idx)
420
+ if not mask.any():
421
+ continue
422
+
423
+ original_nodes = original.x[mask]
424
+ num_original_nodes = original_nodes.size(0)
425
+
426
+ # Get reconstructed features
427
+ max_nodes = min(num_original_nodes, recon_node_features.size(1))
428
+ recon_features = recon_node_features[batch_idx, :max_nodes, :]
429
+
430
+ # Lightweight semantic consistency: encourage reconstructed features to maintain
431
+ # relative relationships present in original (approximates name consistency)
432
+ if max_nodes > 1:
433
+ # Compute cosine similarities in both spaces
434
+ orig_normalized = F.normalize(original_nodes[:max_nodes], p=2, dim=1)
435
+ recon_normalized = F.normalize(recon_features, p=2, dim=1)
436
+
437
+ orig_similarities = torch.matmul(orig_normalized, orig_normalized.t())
438
+ recon_similarities = torch.matmul(recon_normalized, recon_normalized.t())
439
+
440
+ # Light penalty for changing semantic relationships (low weight applied externally)
441
+ semantic_consistency_loss = F.mse_loss(recon_similarities, orig_similarities)
442
+ total_loss += semantic_consistency_loss
443
+ total_nodes += 1
444
+
445
+ # Return average loss
446
+ if total_nodes > 0:
447
+ avg_loss = total_loss / total_nodes
448
+ if isinstance(avg_loss, torch.Tensor):
449
+ return avg_loss.requires_grad_(True)
450
+ else:
451
+ return torch.tensor(avg_loss, device=original.x.device, requires_grad=True)
452
+ else:
453
+ return torch.tensor(0.0, device=original.x.device, requires_grad=True)
454
+
455
+
456
+ def ast_reconstruction_loss_simple(original: Data, reconstructed: Dict[str, Any]) -> torch.Tensor:
457
+ """
458
+ Simplified version of AST reconstruction loss focusing primarily on node prediction.
459
+
460
+ This version is easier to use and debug, focusing on the core node type prediction
461
+ task which is the most important component for AST reconstruction.
462
+
463
+ Args:
464
+ original: Original AST as torch_geometric.data.Data object
465
+ reconstructed: Reconstructed AST from decoder
466
+
467
+ Returns:
468
+ Scalar tensor representing the node type reconstruction loss
469
+ """
470
+ return compute_node_type_loss(original.x, reconstructed['node_features'], original.batch)
471
+
472
+
473
+ # ============================================================================
474
+ # Contrastive Loss Functions for Code-Text Alignment (Phase 5)
475
+ # ============================================================================
476
+
477
+ def info_nce_loss(code_embeddings: torch.Tensor, text_embeddings: torch.Tensor,
478
+ temperature: float = 0.07) -> torch.Tensor:
479
+ """
480
+ InfoNCE (Information Noise Contrastive Estimation) loss for contrastive learning.
481
+
482
+ This loss encourages correct (code, text) pairs to have high similarity while
483
+ pushing incorrect pairs to have low similarity. It's commonly used in
484
+ contrastive learning and multimodal alignment.
485
+
486
+ Args:
487
+ code_embeddings: Code embeddings tensor of shape [batch_size, embedding_dim]
488
+ text_embeddings: Text embeddings tensor of shape [batch_size, embedding_dim]
489
+ temperature: Temperature parameter for scaling similarities (higher = softer)
490
+
491
+ Returns:
492
+ Scalar tensor representing the InfoNCE loss
493
+
494
+ Note:
495
+ Assumes that code_embeddings[i] and text_embeddings[i] form a positive pair,
496
+ while all other combinations are negative pairs.
497
+ """
498
+ batch_size = code_embeddings.size(0)
499
+
500
+ # Normalize embeddings to unit vectors for stable cosine similarity
501
+ code_embeddings = F.normalize(code_embeddings, p=2, dim=1)
502
+ text_embeddings = F.normalize(text_embeddings, p=2, dim=1)
503
+
504
+ # Compute similarity matrix: [batch_size, batch_size]
505
+ # similarity[i, j] = similarity between code[i] and text[j]
506
+ similarity_matrix = torch.matmul(code_embeddings, text_embeddings.t()) / temperature
507
+
508
+ # Create labels: positive pairs are on the diagonal
509
+ labels = torch.arange(batch_size, device=code_embeddings.device)
510
+
511
+ # InfoNCE loss is cross-entropy between similarity scores and correct indices
512
+ # For each code embedding, we want the corresponding text embedding to have highest similarity
513
+ loss_code_to_text = F.cross_entropy(similarity_matrix, labels)
514
+
515
+ # Symmetric loss: for each text embedding, we want the corresponding code embedding to have highest similarity
516
+ loss_text_to_code = F.cross_entropy(similarity_matrix.t(), labels)
517
+
518
+ # Return average of both directions
519
+ return (loss_code_to_text + loss_text_to_code) / 2.0
520
+
521
+
522
+ def cosine_embedding_loss(code_embeddings: torch.Tensor, text_embeddings: torch.Tensor,
523
+ margin: float = 0.2) -> torch.Tensor:
524
+ """
525
+ Simple cosine embedding loss for contrastive learning.
526
+
527
+ This loss encourages positive pairs to have high cosine similarity (close to 1)
528
+ and negative pairs to have low cosine similarity (below margin).
529
+
530
+ Args:
531
+ code_embeddings: Code embeddings tensor of shape [batch_size, embedding_dim]
532
+ text_embeddings: Text embeddings tensor of shape [batch_size, embedding_dim]
533
+ margin: Margin for negative pairs (similarity should be below this)
534
+
535
+ Returns:
536
+ Scalar tensor representing the cosine embedding loss
537
+ """
538
+ batch_size = code_embeddings.size(0)
539
+
540
+ # Normalize embeddings for stable cosine similarity
541
+ code_embeddings = F.normalize(code_embeddings, p=2, dim=1)
542
+ text_embeddings = F.normalize(text_embeddings, p=2, dim=1)
543
+
544
+ # Compute cosine similarities for all pairs
545
+ similarity_matrix = torch.matmul(code_embeddings, text_embeddings.t())
546
+
547
+ # Positive pairs: diagonal elements (code[i] with text[i])
548
+ positive_similarities = torch.diag(similarity_matrix)
549
+
550
+ # Loss for positive pairs: encourage high similarity (target = 1)
551
+ positive_loss = F.mse_loss(positive_similarities, torch.ones_like(positive_similarities))
552
+
553
+ # For negative pairs, only apply if we have more than one sample
554
+ if batch_size > 1:
555
+ # Negative pairs: off-diagonal elements
556
+ mask = torch.eye(batch_size, device=code_embeddings.device).bool()
557
+ negative_similarities = similarity_matrix[~mask]
558
+
559
+ # Loss for negative pairs: encourage low similarity (below margin)
560
+ # Only penalize if similarity is above margin
561
+ negative_loss = F.relu(negative_similarities - margin).mean()
562
+ else:
563
+ # No negative pairs when batch size is 1
564
+ negative_loss = torch.tensor(0.0, device=code_embeddings.device)
565
+
566
+ # Combine losses
567
+ return positive_loss + negative_loss
568
+
569
+
570
+ def simple_contrastive_loss(code_embeddings: torch.Tensor, text_embeddings: torch.Tensor,
571
+ temperature: float = 0.1) -> torch.Tensor:
572
+ """
573
+ Simplified contrastive loss using cosine similarity.
574
+
575
+ This is a straightforward implementation that maximizes cosine similarity
576
+ between correct pairs and minimizes it for incorrect pairs.
577
+
578
+ Args:
579
+ code_embeddings: Code embeddings tensor of shape [batch_size, embedding_dim]
580
+ text_embeddings: Text embeddings tensor of shape [batch_size, embedding_dim]
581
+ temperature: Temperature for scaling similarities
582
+
583
+ Returns:
584
+ Scalar tensor representing the contrastive loss
585
+ """
586
+ # Normalize embeddings
587
+ code_embeddings = F.normalize(code_embeddings, p=2, dim=1)
588
+ text_embeddings = F.normalize(text_embeddings, p=2, dim=1)
589
+
590
+ # Compute cosine similarities
591
+ similarities = F.cosine_similarity(code_embeddings, text_embeddings, dim=1)
592
+
593
+ # Loss is simply negative mean similarity (we want to maximize similarity)
594
+ # Scale by temperature for better gradient flow
595
+ return -similarities.mean() / temperature