import os
import glob
data_dir='data\Train400'
dir_name = os.path.dirname(data_dir)
basename = os.path.basename(data_dir)
# print("dir_name:",os.path.dirname(data_dir))
# print("basename:",os.path.basename(data_dir))
pkl_file = os.path.join(dir_name, basename+'.pkl')
# print("pkl_file:",pkl_file)
file_list = glob.glob(data_dir + '\*.png')
# print("img_file_name:",file_list[10])
# for i,file in enumerate(file_list):
#     print(i)
#     print(file)


import cv2
import numpy as np
import matplotlib.pyplot as plt
patch_size, stride = 40, 10
aug_times = 1
scales = [1, 0.9, 0.8, 0.7]
batch_size = 128

def data_aug(img, mode=0):
    """data augmentation"""
    if mode == 0:
        pass
    elif mode == 1:
        img = np.flipud(img)
    elif mode == 2:
        img = np.rot90(img)
    elif mode == 3:
        img = np.flipud(np.rot90(img))
    elif mode == 4:
        img = np.rot90(img, k=2)
    elif mode == 5:
        img = np.flipud(np.rot90(img, k=2))
    elif mode == 6:
        img = np.rot90(img, k=3)
    elif mode == 7:
        img = np.flipud(np.rot90(img, k=3))
    else:
        raise Exception("Invalid mode!", mode)
    return img

def gen_patches(file_name):
    """get multiscale patches from a single image"""
    img = cv2.imread(file_name, 0)  # gray scale
    # cv2.imshow("test",img)
    # cv2.waitKey(0)
    # print(img)
    # plt.imshow(img)
    # plt.show()
    h, w = img.shape
    print("before scaled: ",img.shape)
    patches = []
    k = 1
    for s in scales:
        h_scaled, w_scaled = int(h*s), int(w*s)
        img_scaled = cv2.resize(img, (h_scaled, w_scaled), interpolation=cv2.INTER_CUBIC)
        print(k)
        k += 1
        print("after scaled: ",img_scaled.shape)
        # extract patches
        for i in range(0, h_scaled-patch_size+1, stride):
            for j in range(0, w_scaled-patch_size+1, stride):
                x = img_scaled[i:i+patch_size, j:j+patch_size]
                for _ in range(0, aug_times):
                    x_aug = data_aug(x, mode=np.random.randint(0, 8))
                    patches.append(x_aug)
    print("total patch num: ",len(patches))
    return patches
import pickle
def generate_save_patches(data_dir='data/Train400', verbose=False):
    """generate image patches and save them"""
    dir_name, basename = os.path.dirname(data_dir), os.path.basename(data_dir)
    pkl_file = os.path.join(dir_name, basename+'.pkl')
    if not os.path.exists(pkl_file):
        file_list = glob.glob(data_dir + '/*.png')
        data = []
        j = 0
        for i, file in enumerate(file_list):
            if i>0:
                break
            print(i)
            print(file)
            patches = gen_patches(file)
            for patch in patches:
                data.append(patch)
            if verbose:
                print(str(i + 1) + '/' + str(len(file_list)) + ' is done ^_^')
        data = np.array(data, dtype='uint8')
        print(data.shape)
        data = np.expand_dims(data, axis=3)
        print(data.shape)
        discard_n = len(data) - len(data) // batch_size * batch_size  # because of batch namalization
        data = np.delete(data, range(discard_n), axis=0)  # (238336, 40, 40, 1), uint8
        # normalization and swap axis
        data = data.astype('float32') / 255.0
        data = data.transpose((0, 3, 1, 2))  # (238336, 1, 40, 40), float32
        print(data.shape)
        # with open(pkl_file, 'wb') as f:
        #     pickle.dump(data, f)
        print('^_^-training data finished-^_^')
    else:
        print('The .pkl file is prepared, loading...')
        f = open(pkl_file, 'rb')
        data = pickle.load(f)
    return data

class DenoisingDataset:
    """Dataset wrapping tensors.
    Arguments:
        xs (Tensor): clean image patches
        sigma: noise level, e.g., 25
    """
    def __init__(self, data_dir, sigma):
        super(DenoisingDataset, self).__init__()
        self.xs = generate_save_patches(data_dir)
        self.sigma = sigma

    def __getitem__(self, index):
        print(self.xs.shape, index)  # (238336, 1, 40, 40)
        batch_x = self.xs[index, ...]
        noise = np.random.standard_normal(size=batch_x.shape) * (self.sigma/255.0)
        batch_y = batch_x + noise
        return batch_y, noise

    def __len__(self):
        return len(self.xs)

import mindspore.dataset as ds
# x = gen_patches("data/Train400/test_001.png")
# print(x[200])
# print(type(x[200]))
# print(x[200].shape)
# print("this is a single patch!")
# # plt.show()
# y = np.array(x,dtype=np.uint8)
# # print(y)
# z = y.astype('float32')/255.0
# # print(z)
# data_set = DenoisingDataset('data/Train400',25)
# # print(data_set)
# train_dataset = ds.GeneratorDataset(data_set, ["noised_img", "noise"], shuffle=True)
# print(train_dataset)
# i = 0
# for data in train_dataset.create_dict_iterator():
#     if i>5:
#         break
#     print('noised_img','{}'.format(data["noised_img"]),'noise', '{}'.format(data["noise"]))
# #     i += 1
# train_dataset = train_dataset.batch(128, drop_remainder=True)
# train_data_size = train_dataset.get_dataset_size()
# print(train_data_size)
# save_dir = os.path.join('models', 'DnCNN'+'_' + 'sigma'+str(25))
# print(save_dir)
# if not os.path.exists(save_dir):
#     os.makedirs(save_dir)
# s = os.path.join(save_dir, 'ckpt' + str(2) + '/')
# print(s)
# if not os.path.exists(s):
#     os.makedirs(s)
# file_source = glob.glob(os.path.join('data/Test/Set68', '*png'))
# print(file_source)
# test_data_path = 'data/Test/Set68'
# noiseL = 25
# x = os.path.join('./', test_data_path.split('/')[-1] + '_L%s_result'%noiseL)
# print(x)
# if not os.path.exists(x):
#     os.makedirs(x)
# f = 'data/Test/Set68\\test001.png'
# print(f.split('/'))
# y = os.path.join(x, f.split('/')[-1])
# if not os.path.exists(y):
#     os.makedirs(y)
# print(y)

# test1 = os.path.join('test1','ljc')
# test2 = os.path.join('./','test','ljc')
# if not os.path.exists(test1):
#     os.makedirs(test1)
# if not os.path.exists(test2):
#     os.makedirs(test2)
# if os.path.exists('test\ljc'):
#     print("exist")
# else:
#     print("not exist")



# 在opencv和os.path中，一旦定位到具体文件，路径中文件名前最后一个斜杠一定是/，前面的可以任意混搭\ /
# 如果是定位文件夹，可以混搭\ /
dir_false_in_cv2 = 'data\Train400\test_001.png'
dir_true_in_cv2 = 'data/Train400/test_001.png'
# os.makedirs(os.path.join('xxx','xxx'))和os.makedirs(os.path.join('./','xxx'))都是在同级目录下创建文件夹
# if os.path.exists('data\Train400/test_001.png'):
#     print('exist')
# else:
#     print('not exist')
# img = cv2.imread('.\data\Train400/test_001.png')
# cv2.imshow('test',img)
# cv2.waitKey(0)
# new_path = os.path.join('./data','hello')
# print(new_path)
# if not os.path.exists(new_path):
#     os.makedirs(new_path)