# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 16:11:32 2021

@author: singularity
"""

import torch
from torch.utils.data import Dataset
from torchvision import transforms


import numpy as np
import PIL
import pandas

import matplotlib.pyplot as plt

class VID(Dataset):
    def __init__(self,trainSet = True,root:str = './tiny_vid/'):
        
        super(VID, self).__init__()
        
        '''
        数据集特点
        '''
        LABEL_FILE_SUFFIX = '_gt.txt'   # 标签文件后缀
        IMG_FILE_SUFFIX = '.JPEG'       # 图像文件后缀
        
        self.CLASSES = ['bird','car','dog','lizard','turtle']    # 类名称
        
        TRAIN_TEST = [150,30]   # 数据集中，训练集与测试集的数量分布

        '''
        数据加载
        '''
        
# =============================================================================
#         normalize = transforms.Normalize(
#             mean=[0.485, 0.456, 0.406],
#             std=[0.229, 0.224, 0.225]
#         )
# =============================================================================
        '''定义图像数据预处理方法'''
        preprocess = transforms.Compose([
            #transforms.Scale(256),
            #transforms.CenterCrop(224),
            transforms.ToTensor(),
            #normalize
        ])
        
        '''  读取数据硬盘文件  '''
        self.allData = []   # 本地数据存放
        if trainSet:
            for classIndex,className in enumerate(self.CLASSES,start=0):
                # 标签文件
                labelSet = pandas.read_csv(root + className + LABEL_FILE_SUFFIX,
                                           header=None,
                                           nrows=TRAIN_TEST[0]+TRAIN_TEST[1])
                
                dataPathNow = root+className + '/'
                
                # 读取训练集
                for trainId in range(1,TRAIN_TEST[0]+1):
                    fileName = ('%06d' % trainId) + IMG_FILE_SUFFIX
                    
                    label = np.array(labelSet[0][trainId-1].split(' '),
                             dtype = np.int0)
                    if label[0] != trainId:
                        raise Exception("数据集标签文件错误！")
                    label[0] = classIndex  # 修正第一个数据为类别（原本为图片编号）
                    
                    label[3] = label[3]- label[1]
                    label[4] = label[4]- label[2]
                    
                    # 数据格式：( 图片Tensor[3x128x128]， 标签Tensor[5x1])
                    # 标签格式：( 类别id，xmin，ymin，w，h)
                    self.allData.append((preprocess(PIL.Image.open(dataPathNow+fileName)),
                                         label
                                         ))
        else: 
            for classIndex,className in enumerate(self.CLASSES,start=0): 
                # 标签文件
                labelSet = pandas.read_csv(root + className + LABEL_FILE_SUFFIX,
                                           header=None,
                                           nrows=TRAIN_TEST[0]+TRAIN_TEST[1])
                
                dataPathNow = root+className + '/'
            
                # 读取测试集
                for testId in range(TRAIN_TEST[0]+1,TRAIN_TEST[0]+TRAIN_TEST[1]+1):
                    fileName = ('%06d' % testId) + IMG_FILE_SUFFIX
                    
                    label = np.array(labelSet[0][testId-1].split(' '),
                             dtype = np.int0)
                    if label[0] != testId:
                        raise Exception("数据集标签文件错误！")
                    label[0] = classIndex  # 修正第一个数据为类别（原本为图片编号）
                    
                    label[3] = label[3]- label[1]
                    label[4] = label[4]- label[2]
                    
                    # 数据格式：( 图片Tensor[3x128x128]， 标签Tensor[5x1])
                    # 标签格式：( 类别id，xmin，ymin，w，h)
                    self.allData.append((preprocess(PIL.Image.open(dataPathNow+fileName)),
                                         label
                                         ))
        
    
    def __getitem__(self, index):
        '''
        直接返回YOLO训练需要的格式
        
        数据格式：(img,label)
        '''
        return self.allData[index]
    
    def __len__(self):
        return len(self.allData)
    
    def convert2trainData(self, fmap:int, anchor:list, stride:list):
        '''
        将数据转换为YOLO训练需要的格式

        Parameters
        ----------
        fmap : int
            预测图数量. The default is 2.
        anchor : list
            不同预测图的Anchor，格式为[fmap x AnchorNum x 2].
        stride : list
            每个预测图对原图的压缩比例，格式为[fmap x 1]

        Returns
        -------
        None.

        '''
        
        if fmap != len(anchor):
            raise Exception("Anchor数据长度与预测图数量不符")
        
        AnchorNum = len(anchor[0])  # Anchor数量，每个特征图必须是一样数量的Anchor
        
        def iou(label,anchor):
            '''
            计算ground truth与Anchor之间的IOU（最大IOU）

            Parameters
            ----------
            label : (w_l,h_l)
            anchor : (w_a,h_a)

            Returns
            -------
            iou.

            '''
            commonS = min(label[0],anchor[0]) * min(label[1],anchor[1]) # 公共面积
            iou = commonS / ( label[0] * label[1] + anchor[0] * anchor[1] - commonS )
            
            return iou
            
        
        ''' 对每个数据的label计算属于哪个Anchor ，采用IOU最大原则'''
        self.label2Anchor = []   # 添加的为顺序的每个标签归属的 (特征图号,Anchor号)
        for _, label in self.allData:
            anchorResCache = [] # 缓存内容：(特征图号，anchor号，iou值)
            for layer in range(fmap):
                for ancNum in range(AnchorNum):
                    anchorResCache.append([layer,
                                           ancNum,
                                           iou( (label[3],label[4]),
                                                anchor[layer][ancNum]
                                               )])
            maxAnchor = max(anchorResCache, key = lambda x : x[2])  # 找出iou最大的Anchor
            
            ''' 根据label属于哪个预测图，进行stride压缩 '''
            Gx = (label[1]+label[3]/2) / stride[maxAnchor[0]]   # (x+w/2) / stride
            Gy = (label[2]+label[4]/2) / stride[maxAnchor[0]]   # (x+w/2) / stride
            
            ''' 计算在特征图上的训练数据 '''
            Fx = int(Gx // 1) ; Fy = int(Gy // 1) # 计算在特征图上的 格子号
            X = Gx % 1; Y = Gy % 1  # 计算标记框在格子里的 偏移量
            W = label[3] / anchor[maxAnchor[0]][maxAnchor[1]][0]   # (w/stride)/(w_anc/stride) = w/w_anc
            H = label[4] / anchor[maxAnchor[0]][maxAnchor[1]][1]   # (w/stride)/(w_anc/stride) = w/w_anc
            W = np.log(W); H = np.log(H)
            
            ''' 存储结果 '''
            self.label2Anchor.append([maxAnchor[0],  # 特征图号 0
                                 Fx,Fy,         # 格子号       1,2
                                 maxAnchor[1],  # anchor号     3
                                 X,Y,           # 中心偏移量     4,5
                                 W,H,           # 长宽系数      6,7
                                 label[0],       # 类别id         8
                                 ])
        print("标签数据格式转换为训练格式成功")
        print("每个样本的形式为：[特征图号，格子号，anchor号，中心xy偏移量，长宽比例，类别]")

    def convert2YOLOJ(self,mapSize = (16,32)):
        '''
        将转换的数据进一步转化为YOLO网络输出的格式，进行训练实际使用
        
        (Tensor[3*(1+4+5 +1),16,16], Tensor[3*(1+4+5 +1),32,32])
                          ↑目标归属逻辑值         ↑置信度、位置信息与类别概率
        '''
        anchorBoxLength = 5+len(self.CLASSES)+1
        for i,label in enumerate(self.label2Anchor):
            ''' 由特征图号，生成目标空白Tensor '''
            objMap = torch.zeros( (3*anchorBoxLength, # 特征通道数
                          mapSize[label[0]],    # X GRID
                          mapSize[label[0]]     # Y GRID
                          ) )
            
            ''' 设计一个有用的目标Anchor内容'''
            objAnchor = torch.Tensor([ 1,   # 置信度
                                      label[4], # x
                                      label[5], # y
                                      label[6], # w
                                      label[7], # h
                                      0,0,0,0,0,    # 5个类别
                                      1     # I_obj
                ])
            objAnchor[5+label[8]] = 1   # 相应类别置1
# =============================================================================
#             print(f"objAnchor shape: {objAnchor.shape}")
# =============================================================================
            
            ''' 装载内容'''
            anchorHead = label[3]*anchorBoxLength
            anchorTail = anchorHead + anchorBoxLength
# =============================================================================
#             print(f"objMap shape:{objMap.shape}")
# =============================================================================
            objMap[ anchorHead: anchorTail, label[1],label[2] ] = objAnchor
            
            
            ''' 覆盖原有标签数据 '''
            if label[0] == 0:
                self.allData[i] = (self.allData[i][0],
                                   (objMap,
                                    torch.zeros((3*anchorBoxLength,32,32))
                                    ))
            elif label[0] == 1:
                self.allData[i] = (self.allData[i][0],
                                   (torch.zeros((3*anchorBoxLength,16,16)),
                                    objMap
                                    ))


def bboxDataCheck(img,gridSize,grid,anchorSize,cf,sz,classid):
    '''
    检查转换出的anchor数据是否正确

    Parameters
    ----------
    img : tensor
        图像数据.
    gridSize : int
        特征图的格子对应像素大小。由特征图号决定
    grid : (int,int)
        检测框属于的格子坐标. 预测量
    anchorSize : (int,int)
        转换时选用的Anchor大小. 预设量
    cf : (float,float)
        中心偏移量. 预测量
    sz : (float,float)
        长宽系数. 预测量
    classid : str
        检测类型. 预测量

    Returns
    -------
    None.

    '''
    plt.figure()
    plt.imshow(img.numpy().transpose((1,2,0)))
    plt.title(classid)
    
    ax = plt.gca()
    
    W = anchorSize[0]*np.exp(sz[0]); H = anchorSize[1]*np.exp(sz[1])
    
    ax.add_patch(plt.Rectangle((gridSize*(grid[0]+cf[0])-W/2,   #X
                                gridSize*(grid[1]+cf[1])-H/2),  #Y
                               W, #W
                               H, #H
                               color="blue", fill=False, linewidth=1))
        
    
if __name__ == "__main__":
    BATCH_SIZE_N = 8
    trainset= VID(False)
    
    AnchorSzList = [[(50,44), (64,82), (82,48)],
               [(64,114), (108,66), (104,102)]]
    StrideSzList = [8,4]
    
    className = trainset.CLASSES
    
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE_N,
                                              shuffle=True, num_workers=2)
    
    dataIter = iter(trainloader)
    
    # 打印样本
    for i in range(4):
        samp = dataIter.next()
        img = samp[0][0]
        label = samp[1][0]
        
        plt.figure(i)
        plt.imshow(img.numpy().transpose((1,2,0)))
        plt.title(trainset.CLASSES[label[0]])
        ax = plt.gca()
        ax.add_patch(plt.Rectangle((label[1], label[2]),    # 起点坐标（x,y）
                                   label[3], label[4],    # 宽度，高度
                                   color="blue", fill=False, linewidth=1))
    
    trainset.convert2trainData(2, AnchorSzList, StrideSzList)
    trainset.convert2YOLOJ()
    img,label = trainset.allData[0]