import numpy as np
import cv2, random
from pathlib import Path
from torch.utils.data import Dataset

class ImgDataset(Dataset):
    def __init__(self, 
                 dataset_dict:dict, 
                 label_set:dict, 
                 mode = 'train', 
                 img_zize:int|list[int] = 256,
                 shuffle=False, 
                 seed:None|int=None) -> None:
        super().__init__()
        self.mode = mode # 定义数据集类型
        self.dataset_dict = dataset_dict # 数据集
        self.labelset = label_set # 标签集，必须与数据集的key一一对应
        self.data_label = [] # 数据与标签[data, onehot],二维列表
        self.__shuffle = shuffle # 是否打乱数据序列
        self.__seed = seed # 随机种子
        self.__img_size = img_zize
        self.__data2list()

    def __data2list(self):
        # 将数据与标签合成序列
        for key, value in self.dataset_dict.items():
            for element in value:
                self.data_label.append([element, self.labelset[key]])
        if self.__shuffle:
            if self.__seed != None:
                random.seed(self.__seed)
            random.shuffle(self.data_label)
        
    def __len__(self):
        return len(self.data_label) # 返回数据集长度
    
    def __getitem__(self, index):
        x = self.data_label[index] # 获取图片地址，避免地址中文
        img = cv2.imread(x[0])
        img = self.img_cut(img, self.__img_size) # type: ignore
        return img, x[1]
    
    def img_cut(self, img:np.ndarray, size):
        # 裁切图片或调整图片尺寸比
        if type(size) == int:
            h, w = size, size
        else:
            h, w = size
        if len(img.shape) == 2: # 灰度图片提升通道
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        img_h, img_w = img.shape[0], img.shape[1] # 获取图片的shape
        # if max(img_h/img_w, img_w/img_h) <= max(h/w, w/h) * 1.5: # 考虑图像旋转不变性时的判断依据
        if img_h/img_w <= h/w * 1.5 or img_w/img_h <= w/h * 1.5:
            img = self.__resize(img, (img_h, img_w), (h, w))
        elif img_h/img_w > h/w * 1.5:
            # 单独裁剪h
            ratio = h / w * 1.5
            modify_h = int(ratio * img_w) # 照片裁剪高度
            h_start = int(img_h / 2 - modify_h / 2)
            h_end = int(img_h / 2 + modify_h / 2)
            img = img[h_start:h_end, :] # 裁剪图片
            img = self.__resize(img, (h_end-h_start, img_w), (h, w))
        elif img_w/img_h > w/h * 1.5:
            # 单独裁剪w
            ratio = w / h * 1.5
            modify_w = int(ratio * img_h)
            w_start = int(img_w / 2 - modify_w / 2)
            w_end = int(img_w / 2 + modify_w / 2)
            img = img[:, w_start:w_end]
            img = self.__resize(img, (img_h, w_end-w_start), (h, w))
        return img

    def __resize(self, img:np.ndarray, shape1:tuple, shape2:tuple):
        # 缩放图片尺寸
        if shape1[0] >= shape2[0] or shape1[1] >= shape2[1]:# 缩小图像
            img = cv2.resize(img, shape2, interpolation=cv2.INTER_AREA)
        elif shape1[0] < shape2[0] or shape1[1] < shape2[1]: # 放大图像
            img = cv2.resize(img, shape2, interpolation=cv2.INTER_CUBIC)
        else:
            img = cv2.resize(img, shape2, interpolation=cv2.INTER_LINEAR)
        return img

class GetData(object):
    def __init__(self, point:str, 
                 valid_test_size:int|float|list[int]|list[float]|None = None, 
                 seed:None|int|bool=None) -> None:
        """
        获取所有数据集的文件地址，并按训练集、验证集、测试集(若不定义，则忽略)划分
            Argm:
            point: str, 数据集文件的根目录
            valid_test_size: int|float|list[int]|list[float]|None = None, 
                定义单组标签的验证集、测试机样本数量，若为None，则不定义验证集、测试集
                非列表时，为仅选取验证集，不定义测试集
                int: 表示从每个标签中选取int个样本
                float: 表示从每个标签中选取len * float个样本, 范围(0, 0.5)
                列表时，定义如下
                [valid_size, test_size]
            seed: None|int|bool=None, 定义数据是否随机选择, 不指定int时为随机选择
        """
        self.__point = Path(point) # get root point of data
        self.__data_dic ={} # 返回所有数据的标签和地址
        self.data_dic_len = {} # 返回所有数据的样本大小
        self.__valid_test_size = valid_test_size # 定义单组标签的验证集、测试机样本数量
        self.__seed = seed
        self.test_dataset = {}
        self.train_dataset = {}
        self.valid_dataset = {}
        self.lebel_onehot = {}
        self.__get_point__() # 获取数据的标签和地址
        self.__get_onehot__()
        self.__split__()
    
    def __get_point__(self):
        """
        遍历获取所有文件的地址及标签，并以字典的形式返回
        """
        for mkdir in self.__point.iterdir():
            files = []
            if mkdir.is_dir():
                for file in mkdir.iterdir():
                    files.append(file)
                else:
                    self.__data_dic[mkdir.stem] = files
                    self.data_dic_len[mkdir.stem] = len(files)
    
    def __split__(self):
        """
        将所有数据集切分成训练、验证、测试集
        """
        if self.__valid_test_size == None or self.__valid_test_size == 0:
            self.train_dataset = self.__data_dic
        elif type(self.__valid_test_size) == list:
            is_float = False
            if self.__valid_test_size[0] < 1: # 判断列表元素是否为float
                is_float = True
            for label, p in self.__data_dic.items(): # 遍历数据集
                if is_float:
                    num_v, num_t = int(len(p) * self.__valid_test_size[0]), \
                        int(len(p) * self.__valid_test_size[1]) # 计算验证、测试集数量
                else:
                    num_v, num_t = self.__valid_test_size
                # 先切分验证集
                self.__split_per_set__(num_v, label, p) # type: ignore
                # 再切分验证集
                self.__split_per_set__(num_t, label, p, t='tset') # type: ignore
        else:
            for label, p in self.__data_dic.items(): # 遍历数据集
                if type(self.__valid_test_size) == float: # 判断列表元素是否为float
                    num_v = int(len(p) * self.__valid_test_size)
                else:
                    num_v = self.__valid_test_size
                self.__split_per_set__(num_v, label, p) # type: ignore

    def __split_per_set__(self, num:int, name:str, data_set:list, t='valid'):
        if self.__seed == None or self.__seed == False: # 不执行随机切分
            if t == 'valid':
                self.valid_dataset[name] = data_set[:num]
                self.train_dataset[name] = data_set[num:]
            elif t == 'test':
                self.test_dataset[name] = data_set[-num:]
                self.train_dataset[name] = self.train_dataset[name][:-num] # 根据已经创建的trainset进一步裁去testset部分
        else:
            if type(self.__seed) == int:
                random.seed(name+str(self.__seed)) # 定义随机种子
            random.shuffle(data_set) # 将list随机打乱
            if t == "valid":
                self.valid_dataset[name] = data_set[:num]
                self.train_dataset[name] = data_set[num:]
            elif t == 'test':
                self.test_dataset[name] = self.train_dataset[name][-num:]
                self.train_dataset[name] = self.train_dataset[name][:-num]

    def __get_onehot__(self):
        # 对载入的标签制作onehot标签
        label_set = self.__data_dic.keys()
        label_list = [0 for i in range(len(label_set))]
        for index, element in enumerate(label_set):
            k = label_list.copy()
            k[index] = 1
            self.lebel_onehot[element] = k

    def onehot2label(self, onehot:list):
        onehot = list(map(int, onehot))
        return [key for key, val in self.__data_dic.items() if val == onehot]


if __name__ == '__main__':
    from torch.utils.data import DataLoader
    p = r'C:\Users\QinHJ\Downloads\indoorCVPR_09'
    p1 = Path(p)
    c = GetData(p, [5, 5], 3)
    d = ImgDataset(c.train_dataset,c.lebel_onehot, shuffle=True, seed= 5)
    loader = DataLoader(d, 4)
    for i, e in enumerate(loader):
        print(e)
    print(c.valid_dataset)
r"""
    img = cv2.imread(r"C:\Users\QinHJ\Downloads\indoorCVPR_09\tv_studio\grimshaw_photo7_91_.jpg")
    img1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    d = img[:,:,0]
    e = img1-d
    f = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
    print(type(img))
    cv2.imshow("W", img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    """
