import os
import json
from glob import glob
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import random

class DigitsDataset(Dataset):
    def __init__(self, data_dir, mode='train', size=(128, 256), aug=True):
        super(DigitsDataset, self).__init__()
        
        self.aug = aug
        self.size = size
        self.mode = mode
        self.width = 224
        self.batch_count = 0
        self.data_dir = data_dir
        
        if mode == 'test':
            self.imgs = glob(os.path.join(data_dir['test_data'], '*.png'))
            self.labels = None
        else:
            labels = json.load(open(data_dir[f'{mode}_label'], 'r'))
            imgs = glob(os.path.join(data_dir[f'{mode}_data'], '*.png'))
            self.imgs = [(img, labels[os.path.split(img)[-1]]) for img in imgs 
                        if os.path.split(img)[-1] in labels]

    def __getitem__(self, idx):
        if self.mode != 'test':
            img, label = self.imgs[idx]
        else:
            img = self.imgs[idx]
            label = None
            
        img = Image.open(img)
        
        # 基���变换
        base_transforms = [
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], 
                              std=[0.229, 0.224, 0.225])
        ]
        
        # 图像处理变换
        img_transforms = [
            transforms.Resize(128),
            transforms.CenterCrop((128, self.width))
        ]
        
        # 数据增强
        if self.aug:
            img_transforms.extend([
                transforms.ColorJitter(0.1, 0.1, 0.1),
                transforms.RandomGrayscale(0.1),
                transforms.RandomAffine(15, translate=(0.05, 0.1), shear=5)
            ])
            
        img_transforms.extend(base_transforms)
        
        if self.mode != 'test':
            return transforms.Compose(img_transforms)(img), torch.tensor(
                label['label'][:4] + (4 - len(label['label'])) * [10]).long()
        else:
            return transforms.Compose(img_transforms)(img), self.imgs[idx]

    def __len__(self):
        return len(self.imgs)

    def collect_fn(self, batch):
        imgs, labels = zip(*batch)
        if self.mode == 'train':
            if self.batch_count > 0 and self.batch_count % 10 == 0:
                self.width = random.choice(range(224, 256, 16))
        
        self.batch_count += 1
        return torch.stack(imgs).float(), torch.stack(labels) 