import paddle
from paddle import io
from paddle.vision import transforms as T
from PIL import Image
from pycocotools.coco import COCO
import nltk
import os

from utils import collate_fn

class COCODataset(io.Dataset):
    """
    COCO dataset
    """
    def __init__(self, root, json, vocab,
            preprocess_fn=None):
        self.root = root
        self.coco = COCO(json)
        self.ids = list(self.coco.anns.keys())
        self.vocab = vocab
        #  self.transform = transform
        self.preprocess_fn = preprocess_fn

    def __getitem__(self, index):
        """Returns the data pair of (image, caption)"""
        # Load the caption at position index
        ann_id = self.ids[index]
        caption = self.coco.anns[ann_id]['caption']

        # Load the corresponding image
        img_id = self.coco.anns[ann_id]['image_id']
        img_name = self.coco.loadImgs(img_id)[0]['file_name']
        image = Image.open(os.path.join(self.root, img_name)).convert('RGB')

        # TODO: transform
        if self.preprocess_fn is not None:
            image_tensor = self.preprocess_fn(image)
        else:
            image_tensor = paddle.to_tensor(image)

        # TODO: tokenize
        tokens = nltk.tokenize.word_tokenize(str(caption).lower())
        target = []
        target.append(self.vocab('<start>'))
        target.extend([self.vocab(token) for token in tokens])
        target.append(self.vocab('<end>'))
        target_tensor = paddle.to_tensor(target)

        return image_tensor, target_tensor

    def __len__(self):
        return len(self.ids)


def getDataLoader(
        root,
        json,
        vocab,
        #  transform,
        preprocess_fn,
        batch_size,
        shuffle,
        num_workers
    ):
    coco = COCODataset(root=root, json=json, vocab=vocab, preprocess_fn=preprocess_fn)
    data_loader = io.DataLoader(dataset=coco, batch_size=batch_size, shuffle=shuffle,
                        num_workers=num_workers, collate_fn=collate_fn)
    return data_loader
