import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image, ImageOps

class SJTUDataset(Dataset):
    def __init__(self, data_path, label_path, transform=None):
        self.data_path = data_path
        self.label_path = label_path
        self.image_files = []
        self.labels = []
        # transform
        self.transform = transform

        ## read data and labels
        for root, dirs, files in os.walk(data_path):
            # each subdir contains images and a labels.txt
            for file in files:
                # read images path
                if file.endswith('.png'):
                    self.image_files.append(os.path.join(root, file))
                # read labels path
                if file.endswith('labels.txt'):
                    with open(os.path.join(root, file), 'r') as f:
                        self.labels.extend(f.read().splitlines())

        if len(self.image_files) != len(self.labels):
            print(f'number of labels: {len(self.labels)}')
            print(f'number of images: {len(self.image_files)}')
            raise Exception('Number of photos and labels do not match')
        

    def __len__(self):
        return len(self.image_files)

    def __getitem__(self, idx):
        img_name = self.image_files[idx]
        image = Image.open(img_name).convert('RGB')
        # fill white
        image = self.fill_white(image)
        image_tensor = self.transform(image)

        label = self.labels[idx]
        label = label_to_onehot(label)
        label_tensor = torch.tensor(label, dtype=torch.float32)
        return image_tensor, label_tensor
    
    @staticmethod
    def fill_white(img: Image):
        width, height = img.size
        
        max_dim = max(width, height)
        delta_w = max_dim - width
        delta_h = max_dim - height

        top_pad = delta_h // 2
        bottom_pad = delta_h - top_pad
        left_pad = delta_w // 2
        right_pad = delta_w - left_pad
        
        new_img = ImageOps.expand(img, border=(left_pad, top_pad, right_pad, bottom_pad), fill='white')
        return new_img

def get_datasets(data_path, transform=transforms.ToTensor()):
    train_data_path = os.path.join(data_path, 'train')
    train_label_path = os.path.join(train_data_path, 'labels.txt')
    test_data_path = os.path.join(data_path, 'test')
    test_label_path = os.path.join(test_data_path, 'labels.txt')

    train_dataset = SJTUDataset(train_data_path, train_label_path, transform=transform)
    test_dataset = SJTUDataset(test_data_path, test_label_path, transform=transform)
    return train_dataset, test_dataset

def label_to_onehot(label: str) -> np.ndarray:
    # one hot encoding
    # in: string(4 or 5 char in a-z)
    # out: [26,5]
    char_set_size = 26
    max_label_length = 5
    one_hot_label = np.zeros((max_label_length, char_set_size))
    
    for i, char in enumerate(label):
        # if the char is A-Z -> a-z
        if ord(char) > ord('z') or ord(char) < ord('a'):
            char = chr(ord(char) - ord('A') + ord('a'))
        index = ord(char) - ord('a')
        one_hot_label[i, index] = 1
    return one_hot_label

def onehot_to_label(onehot: np.ndarray) -> str:
    if type(onehot) == torch.Tensor:
        onehot = onehot.detach().numpy()
    char_set_size = 26
    max_label_length = 5
    label = ''
    for i in range(max_label_length):
        if i == 4 and np.sum(onehot[i]) == 0:
            break
        index = np.argmax(onehot[i])
        label += chr(index + ord('a'))
    return label

if __name__ == '__main__':
    DATA_PATH = './data/'
    transform = transforms.Compose([
        # transforms.Resize((224, 224)),
        transforms.ToTensor()
    ])
    train_dataset, test_dataset = get_datasets(DATA_PATH, transform=transform)
    img, label = train_dataset[0]
    # image = transforms.ToPILImage()(img)
    # image.show()
    # print('image:', img.size())
    # print('image type', type(img))
    label = SJTUDataset.onehot_to_label(label)
    print('label:', label)
    
    # batch_size = 4
    # train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

    # for i, (images, labels) in enumerate(train_loader):
    #     print(f'batch {i+1}:')
    #     print('images:', images.size())
    #     print('labels:', labels)

    