'''
 * @Author: Benjay·Shaw
 * @Date: 2024-10-31 17:07:50
 * @LastEditors: Benjay·Shaw
 * @LastEditTime: 2024-10-31 22:30:19
 * @Description: 数据处理
'''
import os
import paddle
import glob
import random
import cv2
import h5py
import numpy as np
from tqdm import tqdm
from utils.common_function import *


def s_normalize(img, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
    img[0, :, :] = (img[0, :, :] / 255.0 - mean[0]) / std[0]
    img[1, :, :] = (img[1, :, :] / 255.0 - mean[1]) / std[1]
    img[2, :, :] = (img[2, :, :] / 255.0 - mean[2]) / std[2]
    return img


def normalize(data):
    return data / 255


def data_process(args, aug_times, data_type, count):
    trans = paddle.vision.transforms.Compose([paddle.vision.transforms.
        ToPILImage(), paddle.vision.transforms.Resize(args.image_size),
        paddle.vision.transforms.ToTensor(), paddle.vision.transforms.Normalize
        (mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
    if data_type == 'test':
        files_i1 = glob.glob(os.path.join(args.dataset_dir, 'A', '*' + args
            .image_type))
        files_i2 = glob.glob(os.path.join(args.dataset_dir, 'B', '*' + args
            .image_type))
        files_l = glob.glob(os.path.join(args.dataset_dir, 'OUT', '*' +
            args.mask_type))
    else:
        files_i1 = glob.glob(os.path.join(args.dataset_dir, data_type, 'A',
            '*' + args.image_type))
        files_i2 = glob.glob(os.path.join(args.dataset_dir, data_type, 'B',
            '*' + args.image_type))
        files_l = glob.glob(os.path.join(args.dataset_dir, data_type, 'OUT',
            '*' + args.mask_type))
    paddle.sort(x=files_i1), paddle.argsort(x=files_i1)
    paddle.sort(x=files_i2), paddle.argsort(x=files_i2)
    paddle.sort(x=files_l), paddle.argsort(x=files_l)
    h5f = h5py.File(data_type + count + '.h5', 'w')
    h5f_image1 = h5f.create_group('image1')
    h5f_image2 = h5f.create_group('image2')
    h5f_label = h5f.create_group('label')
    if data_type == 'test':
        h5f_label_name = h5f.create_group('label_name')
    data_num = 0
    loop = tqdm(range(len(files_i1)))
    for i in loop:
        img1 = cv2.imread(files_i1[i])
        img2 = cv2.imread(files_i2[i])
        label = cv2.imread(files_l[i])
        img1 = trans(img1)
        img2 = trans(img2)
        label = np.int_(normalize(label))
        label = np.expand_dims(label[:, :, 0], 0)
        data1 = img1
        data2 = img2
        data_label = label
        h5f_image1.create_dataset(str(data_num), data=data1)
        h5f_image2.create_dataset(str(data_num), data=data2)
        h5f_label.create_dataset(str(data_num), data=data_label)
        if data_type == 'test':
            label_name = files_l[i].split('/')[-1].split(args.mask_type)[0]
            label_name = np.array(label_name, dtype=object)
            h5f_label_name.create_dataset(str(data_num), dtype=h5py.
                special_dtype(vlen=str), data=label_name)
        data_num += 1
        if data_type != 'test':
            for m in range(aug_times):
                aug_int = np.random.randint(1, 8)
                data1_aug = data_augmentation(data1, aug_int)
                h5f_image1.create_dataset(str(data_num) + '_aug_%d' % (m + 
                    1), data=data1_aug)
                data2_aug = data_augmentation(data2, aug_int)
                h5f_image2.create_dataset(str(data_num) + '_aug_%d' % (m + 
                    1), data=data2_aug)
                datal_aug = data_augmentation(data_label, aug_int)
                h5f_label.create_dataset(str(data_num) + '_aug_%d' % (m + 1
                    ), data=datal_aug)
                data_num += 1
        loop.set_postfix(num=data_num)
    h5f.close()
    print(data_type + ' set, # samples %d\n' % data_num)


def prepare_data(args, aug_times=0, is_train=True, count='1'):
    if is_train:
        print('process training data')
        data_process(args, aug_times, 'train', count)
        print('\nprocess validation data')
        data_process(args, aug_times, 'val', count)
    else:
        print('\nprocess test data')
        data_process(args, aug_times, 'test', count)


class Dataset(paddle.io.Dataset):

    def __init__(self, data_type='train', count='1'):
        super(Dataset, self).__init__()
        self.data_type = data_type
        self.count = count
        if self.data_type == 'train':
            h5f = h5py.File('train' + self.count + '.h5', 'r')
        elif self.data_type == 'val':
            h5f = h5py.File('val' + self.count + '.h5', 'r')
        else:
            h5f = h5py.File('test' + self.count + '.h5', 'r')
        self.keys = list(h5f['image1'].keys())
        random.shuffle(self.keys)
        h5f.close()

    def __len__(self):
        return len(self.keys)

    def __getitem__(self, index):
        if self.data_type == 'train':
            h5f = h5py.File('train' + self.count + '.h5', 'r')
        elif self.data_type == 'val':
            h5f = h5py.File('val' + self.count + '.h5', 'r')
        else:
            h5f = h5py.File('test' + self.count + '.h5', 'r')
        key = self.keys[index]
        data1 = np.array(h5f['image1'][key])
        data2 = np.array(h5f['image2'][key])
        label = np.array(h5f['label'][key])
        label_name = ''
        if self.data_type == 'test':
            label_name = str(h5f['label_name'][key][()])
        h5f.close()
        return paddle.to_tensor(data=data1), paddle.to_tensor(data=data2
            ), paddle.to_tensor(data=label), label_name
