import torch
import torch.utils.data as data
from PIL import Image
import PIL
import os
import os.path
import pickle
import random
import numpy as np
import pandas as pd


class TextDataset(data.Dataset):
    def __init__(self, data_dir, split='train', imsize=64, transform=None):
        self.transform = transform  # main函数里在实例化TextDataSet之前有一个transform.compose，就是这个玩意
        self.imsize: int = imsize  # 这个imsize是在main里面导入的cfg.imsize
        self.data: list = []  # 这玩意我不知道有什么用，我先留着。
        self.data_dir: str = data_dir  # 理解为根目录
        split_dir: str = os.path.join(data_dir, split)  # 根目录下的split目录
        self.split_dir = split_dir
        self.filenames: list = self.load_filenames(split_dir)  # 一维的list，按image顺序存储
        self.embeddings: np.ndarray = self.load_embedding(split_dir)  # 二维的np.ndarray，image_num*384

    def get_img(self, img_path):
        img = Image.open(img_path).convert('RGB')
        load_size = int(self.imsize * 76 / 64)  # 后面的transform里面有随机截取，所以这里先放大到76*76。
        img = img.resize((load_size, load_size), PIL.Image.BILINEAR)  # 图像resize+插值
        if self.transform is not None:
            img: torch.Tensor = self.transform(img)  # transform在main里面有定义。
        return img  # transform之后的img是一个torch.Tensor类型的3*64*64的张量。

    def load_embedding(self, data_dir):
        embedding_path = os.path.join(data_dir, 'embeddings.pkl')
        with open(embedding_path, 'rb') as f:
            embeddings = pickle.load(f)
            embeddings = np.array(embeddings)
            print('embeddings: ', embeddings.shape)
        return embeddings

    def load_filenames(self, data_dir):
        filepath = os.path.join(data_dir, 'filenames.pkl')
        with open(filepath, 'rb') as f:
            filenames = pickle.load(f)
        print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
        return filenames

    def __getitem__(self, index):
        key = self.filenames[index]
        data_dir = self.split_dir
        embedding: np.ndarray = self.embeddings[index, :]  # np.ndarray，输出的embedding是一维的。
        img_name = '%s/image/%s' % (data_dir, key)
        print(img_name)
        img: torch.Tensor = self.get_img(img_name)  # 这个img是用的Image.open打开的，用的是原程序的方式。
        return img, embedding

    def __len__(self):
        return len(self.filenames)


"""
下面是测试用的，实际程序中要把下面删掉
"""

# import torchvision.transforms as transforms

# data_dir = 'D:/NLP_txt2img'
# image_transform = transforms.Compose([
#     transforms.RandomCrop(64),
#     transforms.RandomHorizontalFlip(),
#     transforms.ToTensor(),
#     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# dataset = TextDataset(data_dir, 'train', imsize=64, transform=image_transform)
# img_, embedding_ = dataset.__getitem__(2)

# print(type(dataset.embeddings), np.shape(dataset.embeddings))
# print(type(dataset.filenames), np.shape(dataset.filenames))

# print(type(img_), np.shape(img_))
# print(img_)
# print(type(embedding_), np.shape(embedding_))
# print(embedding_)
