from .base import *


class EncoderDecoderData(BaseData):

    def train_collate(self, batch):
        src = [x['src'] for x in batch]
        tgt = [x['tgt'] for x in batch]
        res = self.tokenizer(src,
                             padding=True,
                             return_tensors='pt',
                             max_length=self.args.max_source_length,
                             return_attention_mask=True,
                             return_token_type_ids=False,
                             truncation='longest_first')
        target_features = self.tokenizer(tgt,
                                         padding=True,
                                         return_tensors='pt',
                                         max_length=self.args.max_target_length,
                                         return_attention_mask=True,
                                         return_token_type_ids=False,
                                         truncation='longest_first')
        res['decoder_input_ids'] = target_features['input_ids']
        res['decoder_attention_mask'] = target_features['attention_mask']
        return res

    def dev_collate(self, batch):
        src = [x['src'] for x in batch]
        tgt = [x['tgt'] for x in batch]
        res = self.tokenizer(src,
                             padding=True,
                             return_tensors='pt',
                             max_length=self.args.max_source_length,
                             return_attention_mask=True,
                             return_token_type_ids=False,
                             truncation='longest_first')
        target_features = self.tokenizer(tgt,
                                         padding=True,
                                         return_tensors='pt',
                                         return_attention_mask=False,
                                         return_token_type_ids=False)
        res['decoder_input_ids'] = target_features['input_ids']
        return res

    def predict_collate(self, batch):
        ids = [x['ids'] for x in batch]
        src = [x['src'] for x in batch]
        res = self.tokenizer(src,
                             padding=True,
                             return_tensors='pt',
                             max_length=self.args.max_source_length,
                             return_attention_mask=True,
                             return_token_type_ids=False,
                             truncation='longest_first')
        res['id'] = torch.tensor(list(map(int, ids)))
        return res
