import torch
from torch.utils.data import Dataset
from PIL import Image
import torchvision
import os
import numpy as np
import cv2

from collections import Counter

trans = torchvision.transforms.Compose(
    [torchvision.transforms.ToTensor()]
)
cls_prob = {(0, 0, 0): 0.7800574040425421, (224, 224, 192): 0.03926594434563652, (128, 0, 0): 0.0050915152248193285,
            (192, 128, 128): 0.03435511475310695, (0, 64, 128): 0.005664106976450117,
            (128, 192, 0): 0.010862638510532435, (0, 0, 128): 0.003934940252514563, (192, 0, 0): 0.0068382955679378524,
            (64, 0, 128): 0.012891464919828139, (128, 128, 0): 0.006025784744118027, (0, 128, 0): 0.0021594695709781435,
            (128, 0, 128): 0.004726694082428714, (128, 64, 0): 0.005755204367462841,
            (192, 128, 0): 0.008585360422891795, (192, 0, 128): 0.006770111688076068,
            (64, 128, 128): 0.007526617662824611, (0, 192, 0): 0.009746886176023487, (64, 128, 0): 0.006910433749975947,
            (128, 128, 128): 0.009742404083775679, (64, 0, 0): 0.01714419664578414, (0, 128, 128): 0.011952760039694211,
            (0, 64, 0): 0.003992652172598349}
cls_list = [(0, 0, 0), (224, 224, 192), (128, 0, 0), (192, 128, 128), (0, 64, 128), (128, 192, 0),
            (0, 0, 128), (192, 0, 0), (64, 0, 128), (128, 128, 0), (0, 128, 0), (128, 0, 128),
            (128, 64, 0), (192, 128, 0), (192, 0, 128), (64, 128, 128), (0, 192, 0),
            (64, 128, 0), (128, 128, 128), (64, 0, 0), (0, 128, 128), (0, 64, 0)]


# 获得标签列表，包含每个类别像素值，对应的像素个数
def get_segmentation_tag(root):
    tag_item = {}

    for i, path in enumerate(os.listdir(root)):

        img = cv2.imread(fr"{root}/{path}")
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # 现在形状是HWC格式，换成N3格式，将三通道压缩成一个通道
        pixels = img.reshape(-1, 3).tolist()
        result = Counter([tuple(s) for s in pixels])

        add_zero_num = 500 * 500 - len(pixels)
        # 统计各类别数量
        for item in result.items():

            if item[0] == (0, 0, 0):
                pix_num = item[1] + add_zero_num
            else:
                pix_num = item[1]
            if item[0] not in tag_item:
                tag_item[item[0]] = pix_num
            else:
                tag_item[item[0]] += pix_num

        print("handle image number:", i)
    return tag_item


class UNET_dataloader(Dataset):
    # 初始化，统计下有多少张图片,将图片名存成列表
    def __init__(self, root, clsnum=22):
        super(UNET_dataloader, self).__init__()

        self.root = root
        self.datapaths = []
        self.clsnum = clsnum
        # paths = []
        # for path in os.listdir(fr"{root}/SegmentationClass"):
        #     paths.append(path)
        # path_len = len(paths)
        # for i in range(0,10):
        #     self.datapaths.append(paths[i].split('.')[0])
        for path in os.listdir(fr"{root}/SegmentationClass"):
            self.datapaths.append(path.split('.')[0])


    def __len__(self):

        return len(self.datapaths)

    # 制作标签和数据
    def __getitem__(self, item):
        # 数据形状-> N 3 256 256 先贴图，resize，然后totensor

        src_image = cv2.imread(fr"{self.root}/JPEGImages/{self.datapaths[item]}.jpg")
        src_image = cv2.cvtColor(src_image, cv2.COLOR_BGR2RGB)
        h, w = src_image.shape[0], src_image.shape[1]
        paste_image = np.zeros((500, 500, 3), dtype=np.uint8)
        paste_image[(500 - h) // 2:(500 - h) // 2 + h, (500 - w) // 2:(500 - w) // 2 + w] = src_image
        paste_image = cv2.resize(paste_image,(256,256))

        paste_image = trans(paste_image)



        # 标签形状-> 对应像素值
        # 先将标签形状填充成正方形
        tag_image = cv2.imread(fr"{self.root}/SegmentationClass/{self.datapaths[item]}.png")
        tag_image = cv2.cvtColor(tag_image, cv2.COLOR_BGR2RGB)
        h, w = tag_image.shape[0], tag_image.shape[1]
        paste_mask = np.zeros((500, 500, 3), dtype=np.uint8)
        paste_mask[(500 - h) // 2:(500 - h) // 2 + h, (500 - w) // 2:(500 - w) // 2 + w] = tag_image
        paste_mask = cv2.resize(paste_mask, (256, 256))
        # 给相应通道的mask置成1
        tag_mask = np.zeros((self.clsnum,256,256),dtype=np.float32)
        for i in range(self.clsnum):
            tag_mask[i] = np.all(paste_mask==cls_list[i],axis=2)
        tag_mask = torch.from_numpy(tag_mask)
        return paste_image,tag_mask

if __name__ == '__main__':
    # dict = get_segmentation_tag(r"e:\YOLO\VOCdevkit\VOC2012\SegmentationClass")
    # print(dict)

    # dict = {(192, 128, 128): 105682303, (224, 224, 192): 85705227, (0, 0, 0): 1212601879, (128, 0, 0): 62051766, (0, 64, 128): 34941272, (128, 192, 0): 44147554, (0, 0, 128): 53690502, (192, 0, 0): 89518979, (64, 0, 128): 59553923, (128, 128, 0): 45371099, (0, 128, 0): 46958942, (128, 0, 128): 35850825, (128, 64, 0): 29033538, (192, 128, 0): 43658314, (192, 0, 128): 35011051, (64, 128, 128): 19881506, (0, 192, 0): 62556642, (64, 128, 0): 33084379, (128, 128, 128): 30477787, (64, 0, 0): 92923342, (0, 128, 128): 43781742, (0, 64, 0): 53949960}

    # sum_pixel = 0
    #
    # for item in dict.items():
    #     sum_pixel += item[1]
    # print(sum_pixel)
    # new_dict = {}
    # for item in dict.items():
    #     new_dict[item[0]] = item[1] / sum_pixel
    #
    # print(new_dict)
    u_dataset = UNET_dataloader(r"e:\YOLO\VOCdevkit\VOC2012")
    item = u_dataset[0]
