import json
import os
import cv2
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torchvision import transforms
from config import project_path, index_dict


class UnetDataset(torch.utils.data.Dataset):
    def __init__(self, dataset_path, split="train", img_size=(512, 512)):
        self.dataset_path = dataset_path
        self.split = split
        self.img_size = img_size
        self.masks = list()

        # 用于可视化输出的
        # 原图
        self.image_base = list()
        # 图的路径
        self.image_path = list()

        self.images = list()
        # 原图的大小
        self.images_window_size = list()

        self.splitTrainAndTest()
        self.initImage()
        print(f"{split}, size:{len(self)}")

    # 根据 index 返回位置的图像和label
    def __getitem__(self, index):
        return self.images[index], self.masks[index]

    # 数据集的大小
    def __len__(self):
        return len(self.images)

    def getToVisualization(self, index):
        """
        获取数据进行可视化或其他操作
        :param index: 索引
        :return: 用于模型输入的（resize后和二值化后的），标签值[x1, y1, x2, y2, ..., x14, y14], 原图，原图的文件路径
        """
        return self.images[index], self.masks[index], self.image_base[index], self.image_path[index], self.images_window_size[index]

    def splitTrainAndTest(self, test_size=0.2):
        train_file_path = os.path.join(project_path, "midData", "train_image_path.json")
        test_file_path = os.path.join(project_path, "midData", "test_image_path.json")
        if os.path.exists(train_file_path):
            pass
        else:
            if not os.path.exists(os.path.join(project_path, "midData")):
                os.mkdir(os.path.join(project_path, "midData"))

            all_image_path_list = []
            for filename in os.listdir(self.dataset_path):
                if filename.endswith(".bmp"):
                    all_image_path_list.append(filename)
            train_list, test_list = train_test_split(all_image_path_list, test_size=test_size, random_state=42)
            # 输出训练集和测试集
            print("训练集数量:", len(train_list))
            print("测试集数量:", len(test_list))
            with open(train_file_path, 'w', encoding='utf-8') as f:
                json.dump(train_list, f)
            with open(test_file_path, 'w', encoding='utf-8') as f:
                json.dump(test_list, f)

    def initImage(self):
        train_file_path = os.path.join(project_path, "midData", "train_image_path.json")
        test_file_path = os.path.join(project_path, "midData", "test_image_path.json")

        if self.split == "train":
            with open(train_file_path, 'r', encoding='utf-8') as f:
                image_path_list = json.load(f)
        elif self.split == "test":
            with open(test_file_path, 'r', encoding='utf-8') as f:
                image_path_list = json.load(f)

        for filename in image_path_list:
            if filename == ".DS_Store":
                continue
            img = cv2.imread(os.path.join(self.dataset_path, filename))
            self.image_base.append(img)
            self.image_path.append(os.path.join(self.dataset_path, filename))
            # 获取图像的长和宽
            height, width = img.shape[:2]

            img = cv2.resize(img, self.img_size)
            # 转为灰度图像
            gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # 二值化处理
            _, binary_img = cv2.threshold(gray_img, 150, 255, cv2.THRESH_BINARY)
            img = transforms.ToTensor()(binary_img)

            self.images.append(img)

            with open(os.path.join(self.dataset_path,
                                   filename.replace('.bmp', '.json')), 'r', encoding='utf8') as fp:
                json_data = json.load(fp)
                points = json_data['shapes']

            landmarks = [0] * 28
            for point in points:
                landmarks[2 * index_dict[point['label']]] = point['points'][0][0] / width * 512
                landmarks[2 * index_dict[point['label']] + 1] = point['points'][0][1] / height * 512

            landmarks = np.array(landmarks)
            mask = torch.tensor(landmarks, dtype=torch.float32)
            self.masks.append(mask)
            self.images_window_size.append((width, height))


# if __name__ == '__main__':
#     from config import dataset_path
#     u = UnetDataset(dataset_path=dataset_path)
#     u = UnetDataset(dataset_path=dataset_path, split="test")
