import transformers
from dataclasses import dataclass
from typing import Dict, Optional, List, Sequence, Any
import torch
from transformers.trainer_pt_utils import LabelSmoother



@dataclass
class DataCollatorForSupervisedDataset:
    """Collate examples for supervised fine-tuning."""

    tokenizer: transformers.PreTrainedTokenizer
    max_seq_len: int = 4096

    def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
        input_ids, labels = tuple(
            [instance[key] for instance in instances] for key in ("input_ids", "labels")
        )
        pad_token_id = self.tokenizer.pad_token_id
        input_ids = torch.nn.utils.rnn.pad_sequence(
            input_ids, batch_first=True, padding_value=pad_token_id
        ).long()
        labels = torch.nn.utils.rnn.pad_sequence(
            labels, batch_first=True, padding_value=LabelSmoother.ignore_index
        ).long()
        input_ids = input_ids[:, :self.max_seq_len]
        labels = labels[:, :self.max_seq_len]

        return dict(
            input_ids=input_ids,
            labels=labels,
            attention_mask=input_ids.ne(pad_token_id),
        )
