from torch.utils.data import Dataset
import pickle
import cv2
from torchvision import transforms
import torch
import json
import os
import cfg.cfg_voc2012 as cfg2012
import math
import numpy as np


class VOC2012_DATASET(Dataset):
    def __init__(self, root, isTrain=True):
        data_filepath = fr"{root}/traindata.txt" if isTrain else fr"{root}/testdata.txt"
        # data_filepath = fr"{root}/overfit.txt"

        with open(data_filepath, 'r') as fp:
            all_data = fp.readlines()

        self.data = all_data
        self.__imghandler = transforms.Compose([transforms.ToTensor()])

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        labels = {}
        labels[13] = torch.zeros(13, 13, 3, 5 + cfg2012.class_number)
        labels[26] = torch.zeros(26, 26, 3, 5 + cfg2012.class_number)
        labels[52] = torch.zeros(52, 52, 3, 5 + cfg2012.class_number)

        data = self.data[item].split()

        img_root = r"..//dataset/imgdata/JPEGImages/"
        data[0] =img_root+data[0].split('\\')[-1]
        # print(data[0])

        img = cv2.imread(data[0])
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)


        # 填充图片，再缩放成416x416
        h, w = img.shape[0], img.shape[1]
        max_side = max(h, w)
        new_img = np.zeros((max_side, max_side, 3), dtype=np.uint8)
        start_y = (max_side - h) // 2
        start_x = (max_side - w) // 2
        new_img[start_y:start_y + h, start_x:start_x + w] += img
        new_img = cv2.resize(new_img, (cfg2012.img_width, cfg2012.img_width))
        new_img = self.__imghandler(new_img)

        # 获得所有建议框
        boxes = np.array([float(n) for n in data[1:]])
        boxes = np.split(boxes, len(boxes)//5)

        for box in boxes:
            tags = []
            cls, cx, cy, w, h = box
            for feature_size, anchors in cfg2012.anchor_box.items():
                #拿到坐标和偏移
                cx_offset, cx_index = math.modf(cx * feature_size / cfg2012.img_width)
                cy_offset, cy_index = math.modf(cy * feature_size / cfg2012.img_width)
                ground_truth_area = w * h
                #算每种size 特征图的建议框和
                for i, anchor in enumerate(anchors):
                    tag = [feature_size]
                    tag.append(cx_index)
                    tag.append(cy_index)
                    cls_ch = [0]*cfg2012.class_number
                    anchor_area = anchor[0]*anchor[1]
                    iou = min(ground_truth_area,anchor_area)/max(ground_truth_area,anchor_area)
                    w_offs = w/anchor[0]
                    h_offs = h/anchor[1]
                    cls_ch[int(cls)] = 1
                    tag.extend([i,iou,cx_offset,cy_offset,np.log(w_offs),np.log(h_offs)])
                    tag.extend(cls_ch)
                    tags.append(tag)
            tags = np.array(tags)
            index = tags[:,4].argmax()
            addlabel = tags[index]
            # print(addlabel[:5])


            #在这里要不要做置信度筛选？ 如果置信度低于一定值，则认为该框不为正值。
            #是否要设置框的置信度为1

            #将标签加入到lable中
            addlabel_size,addlabel_w,addlabel_h,i = int(addlabel[0]),int(addlabel[1]),int(addlabel[2]),int(addlabel[3])
            labels[addlabel_size][addlabel_h,addlabel_w,i] = torch.from_numpy(addlabel[4:])
            #将cond置1，而非用iou
            labels[addlabel_size][addlabel_h, addlabel_w, i][0] = 1


            # print(index)
            # print(tags)
            # print(tags[index])


        return labels[13],labels[26],labels[52],new_img


if __name__ == '__main__':
    traindata = VOC2012_DATASET(r"D:\1.课程记录\Prj_yolo\data\voc2012")
    testdata = VOC2012_DATASET(r"D:\1.课程记录\Prj_yolo\data\voc2012", isTrain=False)

    for i in traindata:
        print(i[-1])
        pass
    # print(len(traindata))
    # print(len(testdata))
