import numpy as np
import torch
from Fast_RCNN_Extractor import Fast_RCNN_Extractor
import selective_search
from torch.utils.data import DataLoader, Dataset  # 数据加载器
import torch.nn.functional as F
from trian_data_download import Train_data  # 载入数据集
from Encoding_labels import encoding


# 得到当前设备
def get_device():
    """

    :return:
    """
    return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


device = get_device()


class Fast_RCNN_Dataset(Dataset):
    def __init__(self, data):
        """
        初始化
        :param data:
        """
        self.Data = data

    def __len__(self):
        """
        获取dataset的数目
        :return:
        """
        return len(self.Data)

    def __getitem__(self, index):
        """
        从数据集中取出一条数据
        :param index:
        :return:
        """
        dictionary = {}
        # Data中的一条数据 index:0 PIL_Image ,index:1 tag
        image = self.Data[index][0]
        annotation = self.Data[index][1]['annotation']

        full_image_array = np.asarray(image)  # 将img转为np
        objects = annotation['object']  # 得到所有标签
        if type(objects) is not list:  # 其中有些object不是列表形式,因此要处理下
            objects = [objects]
        print('目标对象个数:', len(objects))
        # 获取图片size {'width': '500', 'height': '442', 'depth': '3'}
        size_tuple = annotation['size']
        # 转换为 C H W
        original_size = (int(size_tuple['depth']), int(size_tuple['height']), int(size_tuple['width']))

        # 保存矩形框与对应分类的数组
        ground_truths = []
        name = []

        # 遍历objects得到里面的多个物体,保存他们的name与候选框
        for i in range(len(objects)):
            bndbox = objects[i]['bndbox']
            x_min = int(bndbox['xmin'])
            y_min = int(bndbox['ymin'])
            x_max = int(bndbox['xmax'])
            y_max = int(bndbox['ymax'])
            ground_truth = (x_max, x_min, y_max, y_min)
            obj_name = objects[i]['name']

            ground_truths.append(ground_truth)
            name.append(obj_name)

        # feature_map
        input_tensor = torch.Tensor(full_image_array).to(device)
        input_view = input_tensor.view(-1, 3, original_size[1], original_size[2])  # 转换成 C H W
        feature_map = Fast_RCNN_Extractor(input_view)  # 经过VGG16 会得到压缩后的特征图像 512通道,hw缩小

        # region_of_interests 得到2000个候选框
        regions = selective_search.selective_search(image)
        # feature_map_length
        regions_of_interests = [] # 候选框在特征图的映射
        _, original_height, original_width = original_size  # 原图片的hw
        _, conv_height, conv_width = feature_map.squeeze(0).shape  # 特征提取后的HW

        # 对所有的候选框进行特征映射
        for candidate in regions:
            left = (candidate[0] * conv_width) / original_width
            bottom = (candidate[1] * conv_height) / original_height
            right = left + (candidate[2] * conv_width) / original_width
            top = bottom + (candidate[3] * conv_height) / original_height

            roi = (int(right), int(left), int(top), int(bottom))
            if int(right) > int(left) and int(top) > int(bottom):
                regions_of_interests.append(roi)

        # 剪切得到VGG处理后的候选区域特征 torch.Size([N, 512, 7, 7]) N代表候选区域个数
        model_ids = roi_pooling(feature_map, regions_of_interests, (7, 7))

        t_input = []
        f_input = []
        t_boxs = []
        f_boxs = []

        # 遍历正例候选框
        for i in range(len(ground_truths)):
            x_max, x_min, y_max, y_min = ground_truths[i]
            # 特征map映射
            resized_gt = (int((x_max * conv_width) / original_width),
                          int((x_min * conv_width) / original_width),
                          int((y_max * conv_height) / original_height),
                          int((y_min * conv_height) / original_height)
                          )
            # 遍历生成的候选框找到是正例的
            for bi, RoI in enumerate(regions_of_interests):
                if get_iou(RoI, resized_gt) > 0.3:  # 若特征图中的物体框与生成的候选框iou> 0.3 则标记为正
                    model_label = encoding(name[i])  # 把class映射成数字
                    t_input.append((model_ids[bi], model_label, torch.Tensor(resized_gt)))  # 对应的特征图区域,区域class_name(做分类) , 正例坐标(做回归)
                    t_boxs.append(RoI)  # 候选框在特征图的映射坐标
                elif get_iou(RoI, resized_gt) < 0.2:  # iou小于02 标记负例
                    model_label = encoding(None)
                    f_input.append((model_ids[bi], model_label, torch.Tensor(resized_gt)))  # 对应的特征图区域,区域class_name , 正例坐标
                    f_boxs.append(RoI)  # 候选框在特征图的映射坐标
        # make num to 40 ,正负例个数之和为40

        f_num = 200 - len(t_boxs)
        f_boxs = f_boxs[:f_num]
        ct_boxs = t_boxs + f_boxs

        f_num = 200  - len(t_input)
        f_input = f_input[:f_num]
        model_input = t_input + f_input

        dictionary['Model_INPUT'] = model_input
        dictionary['CT_Boxes'] = ct_boxs

        return dictionary


def get_iou(rect, box):
    """
    计算矩形框与候选框iou
    :param rect:
    :param box:
    :return:
    """
    # ax2, ax1, ay2, ay1 = rect
    # bx2, bx1, by2, by1 = box
    # if ax2 <= bx1 or bx2 <= ax1:
    #     return 0
    # if ay2 <= by1 or by2 <= ay1:
    #     return 0
    # # 完全包含
    # if ax1 < bx1 and ay1 < by1 and bx2 < ax2 and by2 < ay2:
    #     return 1
    # if ax1 > bx1 and ay1 > by1 and bx2 > ax2 and by2 > ay2:
    #     return get_area(rect)/get_area(box)
    # # 相交
    # area = (min(ax2, by2)-max(ax1, bx1)) * (min(ay2, by2)-max(ay1, by1))
    # a_area = get_area(rect)
    # b_area = get_area(box)
    # return area/(a_area + b_area - area)

    ax2, ax1, ay2, ay1 = rect
    bx2, bx1, by2, by1 = box

    xa = max(ax1,bx1)
    ya = max(ay1,by1)
    xb = min(ax2,bx2)
    yb = min(ay2,by2)
    area = max(xa-xb, 0) * max(ya-yb, 0)
    a_area = get_area(rect)
    b_area = get_area(box)
    return area/(a_area + b_area - area)


def get_area(rect):
    return (rect[0]-rect[1]) * (rect[2]-rect[3])


def roi_pooling(feature_map, rois, size):
    """
    :return:
    """
    pooled_roi = []
    rois_num = len(rois)
    for i in range(rois_num):
        roi = rois[i]
        right, left, top, bottom = roi
        cut_feature_map = feature_map[:, :, bottom:top, left: right]  # 获得指定区域
        fixed_feature_map = F.adaptive_max_pool2d(cut_feature_map, size)  # 池化到指定大小
        pooled_roi.append(fixed_feature_map)
    return torch.cat(pooled_roi)


Train_dataloader = DataLoader(Fast_RCNN_Dataset(Train_data),batch_size=1,shuffle=True,drop_last=True)


if __name__ == '__main__' :
    # 测试roi_pooling
    # feature_map = torch.randn((1, 512, 30, 20), dtype=torch.float32)
    # rois = [[7, 5, 4, 1], [17, 12, 14, 7], [20, 11, 12, 3],[20, 11, 12, 3]]
    # size = (7, 7)
    # print(roi_pooling(feature_map, rois, size).size())

    # 获取一个数据
    one = Fast_RCNN_Dataset(Train_data).__getitem__(1)
    for v in range(len(one['Model_INPUT'])):
        # if one['Model_INPUT'][v][1]  == 20 : continue
        print('特征图的size为:{} \n 分类映射数字为 {} \n 候选框映射坐标为 {} \n 对应的正例为坐标{}'.format(one['Model_INPUT'][v][0].size(),
                                                                           one['Model_INPUT'][v][1], one['CT_Boxes'][v],
                                                                           one['Model_INPUT'][v][2]))
        print('=====================')