import math
import os.path

import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from config import cfg
from util import util
import torch.nn.functional as F


class ODDateset(Dataset):
    def __init__(self):
        super().__init__()
        with open(cfg.BASE_LABEL_PATH, 'r', encoding='utf-8') as f:
            self.lines = f.readlines()

    def __len__(self):
        return len(self.lines)

    def __getitem__(self, index):
        """
        :param index: 索引
        :return: 三种特征大小标签值、图片张量
        1. 根据索引读取指定行信息 img_name, cls, cx, cy, gt_w, gt_h
        2. 切割获得图片名字 img_name、标签框信息 cls, cx, cy, gt_w, gt_h
        3. 图片转为张量 img_name --> img_tensor
        4. 通道变换保存标签 H W 27 --> H W 3 9
        5. 标签框切割计算获取 cx, tx, cy, ty
        6. gt_w, gt_h和锚框宽高计算 tw th
        7. 类别cls和类别数创建one-hot编码
        8. 填值 label[cx, cy, feature_idx] = conf tx ty tw th one_hot
        """
        infos = self.lines[index].strip().split()
        img_name = infos[:1][0]
        # '1.jpg'
        img_path = os.path.join(cfg.BASE_IMG_PATH, img_name)
        img = cv2.imread(img_path)
        img_tensor = util.t(img)
        box_info = infos[1:]
        # ['2', '163', '218', '228', '246', '1', '288', '205', '159', '263']
        boxes = np.split(np.array(box_info, dtype=np.float_), len(box_info) // 5)
        # 0 = {ndarray: (5,)} [  2. 163. 218. 228. 246.]
        # 1 = {ndarray: (5,)} [  1. 288. 205. 159. 263.]
        label_dic = {}
        for feature, anchors in cfg.ANCHORS_GROUP.items():
            # H W 3 9
            label = torch.zeros((feature, feature, 3, 5 + cfg.CLASS_NUM))
            scale_factor = cfg.IMG_ORI_SIZE / feature
            # 416 / 13 = 32
            for box in boxes:
                cls, cx, cy, gt_w, gt_h = box
                # [  2. 163. 218. 228. 246.]
                offset_x, cx_idx = math.modf(cx / scale_factor)
                # 0 = {float} 0.09375
                # 1 = {float} 5.0
                offset_y, cy_idx = math.modf(cy / scale_factor)
                for idx, anchor in enumerate(anchors):
                    anchor_w, anchor_h = torch.tensor(anchor)
                    # torch.log 加速收敛速度 保证宽高为正1
                    tw = torch.log(gt_w / anchor_w)
                    th = torch.log(gt_h / anchor_h)
                    one_hot = F.one_hot(torch.tensor(int(cls), dtype=torch.int64), num_classes=cfg.CLASS_NUM)
                    # tensor([0, 0, 1, 0])
                    conf = 1
                    label[int(cy_idx), int(cx_idx), idx] = torch.tensor([conf, offset_x, offset_y, tw, th, *one_hot])
                    # w h c
                    label_dic[feature] = label
        f1, f2, f3 = cfg.ANCHORS_GROUP.keys()
        # 13 26 52
        return label_dic[f1], label_dic[f2], label_dic[f3], img_tensor


if __name__ == '__main__':
    dataset = ODDateset()
    print(dataset[0])
    pass
