from typing import List
import torch
from torchvision import transforms
from language import AbstractBaseVocabulary
from transformers import GPT2Tokenizer, GPT2Model





class ToIds(object):
    def __init__(self, vocabulary: AbstractBaseVocabulary):
        self.vocabulary = vocabulary

    def __call__(self, text: str) -> List[int]:
        return self.vocabulary.convert_text_to_ids(text)


class ToLongTensor(object):
    def __call__(self, ids: List[int]) -> torch.LongTensor:
        return torch.LongTensor(ids)


class BertEmb:
    def __init__(self):
        super()
        self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        self.model = GPT2Model.from_pretrained('gpt2')
        # text = "Replace me by any text you'd like."

    def text_to_embedding(self, text):
        encoded_input = self.tokenizer(text, return_tensors='pt')
        output = self.model(**encoded_input)
        output = output['last_hidden_state'].squeeze()
        output = output.detach()
        return output

    def __call__(self, text, *args, **kwargs):
        return self.text_to_embedding(text)


def text_transform_factory(config: dict):
    vocabulary = config['vocabulary']

    return {
        'train': transforms.Compose([ToIds(vocabulary), ToLongTensor()]),
        'val': transforms.Compose([ToIds(vocabulary), ToLongTensor()])
    }


def text_transform_bert_factory():

    return {
        'train': transforms.Compose([BertEmb()]),
        'val': transforms.Compose([BertEmb()])
    }
