import torch

class TransformBase:
    def __call__(self, sentence):
        if sentence == None:
            return None
        return self.transform(sentence)

    def transform(self, sentence):
        raise NotImplementedError

class SequenceTransform(TransformBase):
    def __call__(self, transforms):
        self.transforms = transforms
    
    def transform(self, sentence):
        for transform in self.transforms:
            sentence = transform(sentence)
        return sentence

class FilterTransform(TransformBase):
    def __init__(self, filter):
        self.filter = filter

    def transform(self, sentence):
        if self.filter(sentence):
            return sentence
        return None

class IndexMappingTransform(TransformBase):
    def __init__(self, device, tokenizer):
        super().__init__()
        self.device = device
        self.tokenizer = tokenizer

    def transform(self, sentence):
        # { "sentence", "file_id", "sent_id", "np_list" }
        if len(sentence["np_list"]) <= 2 or 30 <= len(sentence["np_list"]):
            return None
        tokens = [ 101, ] # [CLS]
        index_mapping = []
        start, end = 1, 1
        for raw_token in sentence["sentence"]:
            encoded = self.tokenizer.encode(raw_token, add_special_tokens=False)
            tokens.extend(encoded)
            end += len(encoded)
            index_mapping.append((start, end))
            start += len(encoded)
        tokens.append(102)    # [SEP]s
        l = len(tokens)
        if l <= 8 or 128 <= l: # ignore doc that is too long or too short
            return None

        # tokens = torch.tensor(tokens, device=self.device)
        # raw_tokens: parsed into subword but not yet converted to ids
        raw_tokens = self.tokenizer.convert_ids_to_tokens(tokens)
        
        index_as_start = lambda x: index_mapping[x][0]
        index_as_end = lambda x: index_mapping[x][1]

        np_list = [ [index_as_start(x), index_as_end(y-1)] 
                            for x, y in sentence["np_list"] ]
        
        def interval_to_indices(interval, padding):
            indices = list(range(*interval))
            mask = [1,] * len(indices)
            if len(indices) > padding:
                print("Warning: padding not used.")
                return (indices, mask)
            while len(indices) < padding:
                indices.append(0)
                mask.append(0)
            return (indices, mask)

        np_max_length = max([y-x for x, y in np_list])
        
        np_list = [ interval_to_indices(x, np_max_length) for x in np_list]
        
        sentence["np_list"] = np_list
        
        # this is the format of a sentence.
        transformed = {
            "tokens": tokens,
            "raw": raw_tokens
        }
        transformed.update(sentence)
        return transformed