rainym00d's picture
Upload folder using huggingface_hub
c96df66 verified
from dataclasses import dataclass
from typing import Any, Dict, List
import torch
@dataclass
class MyCollator:
pad_token_id: int
attention_pad_value: int = 0
label_pad_value: int = -100
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
# * extract data from features, and format them from dict to list
input_ids = [f["input_ids"] for f in features] # List[List[int]]
placeholder_indices = [f["placeholder_indices"] for f in features] # List[List[int]]
super_input_ids = [f["super_input_ids"] for f in features] # List[List[List[int]]]
super_token_indices = [f["super_token_indices"] for f in features] # List[List[List[int]]]
labels = [f["labels"] for f in features] if "labels" in features[0] else None # List[List[int]]
# * process model input
(
input_ids,
attention_mask,
placeholder_indices,
labels,
) = self.process_model_inputs(
input_ids,
placeholder_indices,
labels,
)
# * process super_tokenizer input
(
super_input_ids,
super_attention_mask,
super_token_indices,
) = self.process_super_tokenizer_inputs(
super_input_ids,
super_token_indices,
)
# * to torch tensor
input_ids = torch.tensor(input_ids)
attention_mask = torch.tensor(attention_mask)
super_input_ids = torch.tensor(super_input_ids)
super_attention_mask = torch.tensor(super_attention_mask)
labels = torch.tensor(labels) if labels else None
# * format
res = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"super_input_ids": super_input_ids,
"super_attention_mask": super_attention_mask,
"placeholder_indices": placeholder_indices,
"super_token_indices": super_token_indices,
"labels": labels,
}
return res
def process_model_inputs(self, input_ids, placeholder_indices, labels):
# * get attention mask
max_len = get_max_length_in_nested_lists(input_ids)
attention_mask = get_attention_mask_from_nested_lists(input_ids)
# * get new placeholder_indices since padding side is left
placeholder_indices = [
[idx + max_len - len(input_ids[i]) for idx in placeholder_indices[i]]
for i in range(len(placeholder_indices))
]
# * pad
input_ids = pad_nested_lists(input_ids, max_len, self.pad_token_id, "left")
attention_mask = pad_nested_lists(attention_mask, max_len, self.attention_pad_value, "left")
if labels:
labels = pad_nested_lists(labels, max_len, self.label_pad_value, "left")
return input_ids, attention_mask, placeholder_indices, labels
def process_super_tokenizer_inputs(self, input_ids, super_token_indices):
# * 3D -> 2D
input_ids = sum(input_ids, []) # List[List[int]]
super_token_indices = sum(super_token_indices, []) # List[List[int]]
# * filter empty item
new_input_ids = []
new_super_token_indices = []
for i in range(len(input_ids)):
if len(super_token_indices[i]) != 0:
new_input_ids.append(input_ids[i])
new_super_token_indices.append(super_token_indices[i])
input_ids = new_input_ids
super_token_indices = new_super_token_indices
if len(input_ids) == 0:
return [], [], []
# * get attention mask and pad
max_len = get_max_length_in_nested_lists(input_ids)
attention_mask = get_attention_mask_from_nested_lists(input_ids)
input_ids = pad_nested_lists(input_ids, max_len, self.pad_token_id)
attention_mask = pad_nested_lists(attention_mask, max_len, self.attention_pad_value)
return input_ids, attention_mask, super_token_indices
def get_max_length_in_nested_lists(lst):
if isinstance(lst[0], list):
lengths = []
for elem in lst:
length = get_max_length_in_nested_lists(elem)
lengths.append(length)
max_length = max(lengths)
return max_length
else:
return len(lst)
def get_attention_mask_from_nested_lists(lst):
if isinstance(lst[0], list):
attention_mask = []
for elem in lst:
mask = get_attention_mask_from_nested_lists(elem)
attention_mask.append(mask)
return attention_mask
else:
return [1] * len(lst)
def pad_nested_lists(lst, max_length, padding_value, padding_side="right"):
if isinstance(lst, list) and len(lst) and isinstance(lst[0], list):
for i, elem in enumerate(lst):
lst[i] = pad_nested_lists(elem, max_length, padding_value, padding_side)
return lst
elif isinstance(lst, list):
if padding_side == "right":
return lst + [padding_value for _ in range(max_length - len(lst))]
else:
return [padding_value for _ in range(max_length - len(lst))] + lst
else:
raise NotImplementedError(f"Unrecognized type {lst}")