| from typing import Dict, Sequence, Tuple |
| import re |
| import numpy as np |
| import torch |
|
|
|
|
| def postprocess_classification_generation(predictions) -> str: |
| return re.split("Prompt|Completion", predictions, 1)[0] |
|
|
|
|
| def compute_classification_accuracy(predictions: Sequence[Dict[str, str]]) -> float: |
| """Compute the accuracy of a sequence of predictions.""" |
|
|
| def _preprocess_fn(s): |
| """Function to preprocess both targets and predictions.""" |
| return s.lower() |
|
|
| is_correct = [ |
| _preprocess_fn(x["prediction"]) == _preprocess_fn(x["class_label"]) |
| for x in predictions |
| ] |
|
|
| return np.mean(is_correct).item() |
|
|
|
|
| def compute_shifted_logits_and_labels( |
| logits: torch.Tensor, encodings, tokenizer, eoc_token_id |
| ) -> Tuple[torch.Tensor, torch.Tensor]: |
| """Helper function to compute shifted logits and labels. |
| |
| This allows for straightforward computation of the loss on shift_logits |
| and shift_labels such that the nth element of logits computes the n-1th |
| element of the original labels (in the outputs, the nth element of logits |
| corresponds to the nth element of the labels). |
| |
| Elements in shift_labels that correspond to inputs are masked with values |
| of -100 (by default in hf, loss is only computed on token IDs >= 0). |
| |
| Returns: tuple containing two elements: |
| shift_logits: a float Tensor of shape [batch_size, seq_len - 1]. |
| shift_labels: an integer Tensor of shape [batch_size, seq_len - 1] |
| """ |
|
|
| labels = encodings["input_ids"].clone() |
|
|
| |
| labels[labels == tokenizer.pad_token_id] = -100 |
| labels[labels == eoc_token_id] = -100 |
|
|
| |
| |
| for idx in range(len(labels)): |
| |
| |
| |
| |
| end_of_prefix = -labels[idx].tolist()[::-1].index(tokenizer.eos_token_id) - 1 |
| labels[idx, : end_of_prefix + 1] = -100 |
|
|
| |
| |
| shift_logits = logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
|
|
| return shift_logits, shift_labels |
|
|
|
|
| def compute_per_sample_probs( |
| encodings, tokenizer, logits: torch.Tensor, eoc_token_id |
| ) -> torch.Tensor: |
| """Helper function to compute per-sample probability of the input sequence. |
| |
| Assumes <eos token> is used to separate inputs from targets in the |
| prompt text |
| """ |
| shift_logits, shift_labels = compute_shifted_logits_and_labels( |
| logits, encodings, tokenizer, eoc_token_id |
| ) |
|
|
| |
| |
| |
| unmasked_indices = torch.nonzero(shift_labels != -100, as_tuple=True) |
| |
| |
| unmasked_token_ids = shift_labels[unmasked_indices] |
|
|
| |
| target_idxs = torch.column_stack([*unmasked_indices, unmasked_token_ids]) |
| target_idxs = target_idxs.to(shift_logits.device) |
|
|
| |
| |
| assert torch.all( |
| torch.bincount(target_idxs[:, 0]) != 0 |
| ), "At least one element in batch has no unmasked target tokens." |
|
|
| |
| |
| shift_probs = torch.nn.functional.softmax(shift_logits, 2) |
|
|
| |
| |
| target_probs = torch.ones(len(shift_labels), device=shift_logits.device) |
| for i, j, k in target_idxs: |
| target_probs[i] *= shift_probs[i, j, k] |
|
|
| return target_probs |
|
|
|
|
| def compute_per_sample_loss(encodings, tokenizer, logits, eoc_token_id) -> torch.Tensor: |
| """Helper function to compute per-sample classification loss. |
| |
| Assumes <eos token> is used to separate inputs from targets in the |
| prompt text |
| """ |
| shift_logits, shift_labels = compute_shifted_logits_and_labels( |
| logits, encodings, tokenizer, eoc_token_id |
| ) |
|
|
| device = shift_logits.device |
|
|
| |
| |
| |
| |
| |
| loss = torch.nn.functional.cross_entropy( |
| shift_logits.view(-1, shift_logits.size(-1)), |
| shift_labels.view(-1).to(device), |
| reduction="none", |
| ) |
|
|
| |
| loss = loss.view(shift_logits.size(0), shift_logits.size(1)).cpu() |
|
|
| |
| |
| loss_mask = (shift_labels != -100).int().cpu() |
|
|
| loss *= loss_mask |
|
|
| |
| |
| |
| loss = loss.sum(dim=1) / (shift_labels != -100).sum(dim=1).float() |
| return loss |
|
|