|
import torch |
|
import random |
|
from vocabulary_split import split_vocabulary, filter_logits |
|
|
|
from masking_methods import tokenizer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
permissible, _ = split_vocabulary(seed=42) |
|
permissible_indices = torch.tensor([i in permissible.values() for i in range(len(tokenizer))]) |
|
|
|
def sample_word(sentence, words, logits, sampling_technique='inverse_transform', temperature=1.0): |
|
filtered_logits = filter_logits(torch.tensor(logits), permissible_indices) |
|
|
|
if sampling_technique == 'inverse_transform': |
|
probs = torch.softmax(filtered_logits / temperature, dim=-1) |
|
cumulative_probs = torch.cumsum(probs, dim=-1) |
|
random_prob = random.random() |
|
sampled_index = torch.where(cumulative_probs >= random_prob)[0][0] |
|
elif sampling_technique == 'exponential_minimum': |
|
probs = torch.softmax(filtered_logits / temperature, dim=-1) |
|
exp_probs = torch.exp(-torch.log(probs)) |
|
random_probs = torch.rand_like(exp_probs) |
|
sampled_index = torch.argmax(random_probs * exp_probs) |
|
elif sampling_technique == 'temperature': |
|
probs = torch.softmax(filtered_logits / temperature, dim=-1) |
|
sampled_index = torch.multinomial(probs, 1).item() |
|
elif sampling_technique == 'greedy': |
|
sampled_index = torch.argmax(filtered_logits).item() |
|
else: |
|
raise ValueError("Invalid sampling technique. Choose 'inverse_transform', 'exponential_minimum', 'temperature', or 'greedy'.") |
|
|
|
sampled_word = tokenizer.decode([sampled_index]) |
|
|
|
|
|
filled_sentence = sentence.replace('[MASK]', sampled_word) |
|
|
|
return filled_sentence |