import torch
from torch.utils.data import Dataset
import os
import cv2
import numpy as np
from typing import Tuple
import torchvision.transforms as transforms


class usher_data_loader(Dataset):

    def __init__(self) -> None:
        super().__init__()


class usher_pic_data_loader(usher_data_loader):
    def __init__(self, class_num, pic_dir_path: str, label_dir_path: str, target_size: tuple = (448, 448), is_train: bool = True, grid_count: tuple = (7, 7), loss_mode: str = "mse", mean: tuple = (0.075, 0.076, 0.07), std: tuple = (0.128, 0.128, 0.121)) -> None:
        super().__init__()
        self.target_size = target_size
        self.pic_dir_path = pic_dir_path
        self.label_dir_path = label_dir_path
        self.img_names = [name for name in os.listdir(pic_dir_path) if "." + name.split(".")[-1] in self.get_images_suffix()]
        self.is_train = is_train
        self.grid_count = grid_count
        self.loss_mode = loss_mode
        self.class_num = class_num
        self.transform_common = transforms.Compose([
            transforms.ToTensor(),  # height * width * channel -> channel * height * width
            transforms.Normalize(mean=mean, std=std)  # 归一化后.不容易产生梯度爆炸的问题
        ])

    def __len__(self) -> int:
        return len(self.img_names)

    def __getitem__(self, index: int) -> tuple:
        img_path = os.path.join(self.pic_dir_path, self.img_names[index])
        temp_name = self.img_names[index]
        for suffix in self.get_images_suffix():
            temp_name = temp_name.replace(suffix, self.get_label_suffix())
        label_path = os.path.join(self.label_dir_path, temp_name)
        img, labels = self.trans_to_numpy(img_path, label_path)
        img = self.resize(img)
        return self.deal_after(img, labels)

    def resize(self, img: np.ndarray):
        img = cv2.resize(img, self.target_size, cv2.INTER_LINEAR)
        return img

    def get_label_suffix(self):
        return ".txt"

    def get_images_suffix(self):
        return [".png", ".jpg"]

    def trans_to_numpy(self, img_path, label_path) -> tuple:
        pass

    def deal_after(self, img, labels):
        ground_truth, ground_mask_positive = self.deal_label_after(img, labels)
        return torch.Tensor(self.transform_common(img)), (torch.Tensor(ground_truth), ground_mask_positive)

    def deal_label_after(self, img, labels) -> Tuple[np.array, torch.tensor]:
        pass


class usher_pic_yolo_data_loader(usher_pic_data_loader):

    def deal_label_after(self, img, labels):

        # 构造ground掩码 可信 初始化置false
        ground_weight_size = self.target_size[0] // self.grid_count[0]
        ground_high_size = self.target_size[1] // self.grid_count[1]
        ground_mask_positive = np.full(shape=(self.grid_count[0], self.grid_count[1], 1), fill_value=False, dtype=bool)

        if self.loss_mode == "mse":
            ground_truth = np.zeros([self.grid_count[0], self.grid_count[1], 5 + self.class_num + 2])
        else:
            ground_truth = np.zeros([self.grid_count[0], self.grid_count[1], 5 + 1])

        for coord in labels:
            # 类别 x中心点比例 y中心点比例 宽度比例 高度比例
            class_id, center_x_prop, center_y_prop, ground_width_prop, ground_height_prop = coord
            center_x = center_x_prop * self.target_size[0]
            center_y = center_y_prop * self.target_size[1]

            # ground_box 横向纵向坐标
            index_col = (int)(center_x / ground_weight_size)
            index_row = (int)(center_y / ground_high_size)

            # 分类标签 label_smooth
            if self.loss_mode == "mse":
                # 转化为one_hot编码 对one_hot编码做平滑处理
                class_list = np.full(shape=self.class_num, fill_value=1.0, dtype=float)
                deta = 0.01
                class_list = class_list * deta / (self.class_num - 1)
                class_list[class_id] = 1.0 - deta
            elif self.loss_mode == "cross_entropy":
                class_list = [class_id]
            else:
                raise Exception("the loss mode can't be support now!")

            # 定位数据预设
            ground_box = [center_x_prop, center_y_prop, ground_width_prop, ground_height_prop, 1]
            ground_box.extend(class_list)
            ground_box.extend([index_col, index_row])

            ground_truth[index_row][index_col] = np.array(ground_box)
            ground_mask_positive[index_row][index_col] = True

        return ground_truth, torch.BoolTensor(ground_mask_positive)

    def trans_to_numpy(self, img_path, label_path) -> tuple:
        # img
        imread = cv2.imread(img_path)

        # label
        result = []
        with open(label_path, 'r') as context:
            for line in context:
                split = line.replace('\n', '').split(" ")

                label = [int(split[0]), float(split[1]), float(split[2]), float(split[3]), float(split[4])]
                result.append(label)

        return imread, result


if __name__ == '__main__':
    img, ground_truth, ground_mask_positive = usher_pic_yolo_data_loader(2, "F:\\data\\wow\\img_data\\images", "F:\\data\\wow\\img_data\\labels").__getitem__(0)
    print(img)
    print(ground_truth)
    print(ground_mask_positive)
