import paddle
# from paddle.vision import transforms
from paddle.io import Dataset, DataLoader
from paddle.vision.transforms import ToTensor
from PIL import Image
import os
import pickle
import joblib
import numpy as np
import nltk
from pycocotools.coco import COCO
import pdb

class COCODataset(Dataset):
    """
    COCO dataset
    """
    def __init__(self, root, json, vocab, transform=None):
        """Set the path for images, captions and vocabulary wrapper
        Args:
            root: image directory
            json: coco annotation file path
            vocab: vocabulary wrapper
            transform: image transformer
        """
        self.root = root
        self.coco = COCO(json)
        self.ids = list(self.coco.anns.keys())
        self.vocab = vocab
        self.transform = transform
    # DataLoader采用多线程异步加载数据, 出错不便调试, 建议在本地调试好后再投入生产
    # 进展: python -i 模式下会报出较为详细的错误信息
    def __getitem__(self, index):
        """Returns the data pair of (image, caption)"""
        coco = self.coco
        vocab = self.vocab
        ann_id = self.ids[index]
        caption = coco.anns[ann_id]['caption']
        img_id = coco.anns[ann_id]['image_id']
        path = coco.loadImgs(img_id)[0]['file_name']
        tt = ToTensor()
        
        image = Image.open(os.path.join(self.root, path)).convert('RGB')
        if self.transform is not None:
            image = self.transform(image)
            image = tt(image)

        # Convert caption (str) to word ids.
        tokens = nltk.tokenize.word_tokenize(str(caption).lower())
        caption = []
        caption.append(vocab('<start>'))
        caption.extend([vocab(token) for token in tokens])
        caption.append(vocab('<end>'))
        target = paddle.to_tensor(caption)
        return image, target

    def __len__(self):
        return len(self.ids)

def collate_fn(data:list):
    """
    Creates mini-batch tensors from the list of tuples (images, caption).

    We should build custom collate_fn rather than using default collate_fn,
    because merging caption (including padding) is not supported by default.

    Args:
        data (list(tuple)): image, caption.
            - image: tensor of shape (3, 256, 256).
            - caption: tensor of shape (?) variable length.

    Returns:
        images: tensor of shape (batch_size, 3, 256, 256).
        targets: tensor of shape(batch_size, padded_length).
        lengths (list): valid length for each padded caption.
    """
    # Sort the list of data by caption in descending order
    data.sort(key=lambda x: len(x[1]), reverse=True)
    images, captions = zip(*data)

    # Merge images from tuple of 3D tensor to 4D tensor
    images = paddle.stack(images, axis=0)

    # Merge captions from tuple of 1D tensor to 2D tensor
    lengths = [len(cap) for cap in captions]
    targets = paddle.zeros(shape=(len(captions), max(lengths)), dtype='int64')
    for i, cap in enumerate(captions):
        end = lengths[i]
        targets[i, :end] = cap[:end]
    return images, targets, lengths


def getDataLoader(
        root,
        json,
        vocab,
        transform,
        batch_size,
        shuffle,
        num_workers
    ):
    coco = COCODataset(root=root, json=json, vocab=vocab, transform=transform)
    print(collate_fn)
    data_loader = DataLoader(dataset=coco, batch_size=batch_size, shuffle=shuffle,
                        num_workers=num_workers, collate_fn=collate_fn)
    return data_loader
