# -*- encoding:utf-8 -*-
"""
txt文件: img_name.jpg x1 y1 x2 y2 c x1 y1 x2 y2 c ... 两张图
"""
import os
import os.path as osp

import random
import numpy as np

import torch
import torch.utils.data as data
import torchvision.transforms as transforms

import cv2


class YOLODataset(data.Dataset):
    def __init__(self, root, list_file, is_train, transform,
                 img_size=224, grid_size=7, num_boxes=2, num_classes=20):
        self.root = root
        self.is_train = is_train
        self.transform = transform
        self.img_size = img_size
        self.grid_size = grid_size
        self.num_boxes = num_boxes
        self.num_classes = num_classes
        self.fnames = []
        self.bboxes = []
        self.labels = []
        self.mean = (123, 117, 104)  # RGB

        if isinstance(list_file, list):
            # cat multiple list files
            # this is especially useful for voc2007/voc2012
            cat_file = 'cat_list_file.txt'
            os.system('cat %s > %s' % (' '.join(list_file), cat_file))
            list_file = cat_file

        with open(list_file) as f:
            lines = f.readlines()

        for line in lines:
            split_str = line.strip().split()
            self.fnames.append(split_str[0])
            num_bboxes = (len(split_str) - 1) // 5
            boxes = []
            labels = []
            for i in range(num_bboxes):
                x1 = float(split_str[1 + 5 * i])
                y1 = float(split_str[2 + 5 * i])
                x2 = float(split_str[3 + 5 * i])
                y2 = float(split_str[4 + 5 * i])
                cls = int(split_str[5 + 5 * i])
                boxes.append([x1, y1, x2, y2])
                labels.append(cls)
            self.bboxes.append(torch.Tensor(boxes))
            self.labels.append(torch.LongTensor(labels))
        self.num_samples = len(self.bboxes)

    def __getitem__(self, idx):
        fname = self.fnames[idx]
        # print(self.root, fname)
        _img = cv2.imread(os.path.join(self.root, fname))


        # print(_img.shape)
        _box = self.bboxes[idx].clone()
        _label = self.labels[idx].clone()

        if self.is_train:
            _img, _box = self.random_flip(_img, _box)
            _img, _box = self.random_scale(_img, _box)
            _img = self.random_blur(_img)
            _img = self.random_brightness(_img)
            _img = self.random_hue(_img)
            _img = self.random_saturate(_img)
            _img, _box, _label = self.random_shift(_img, _box, _label)
            _img, _box, _label = self.random_crop(_img, _box, _label)

        h, w, _ = _img.shape
        _box /= torch.Tensor([w, h, w, h]).expand_as(_box)  # normalization
        _img = self.bgr2rgb(_img)   # pytorch pretrained model use RGB
        #  _img = self.sub_mean(_img, self.mean)  # subtract mean
        img = cv2.resize(_img, (self.img_size, self.img_size))
        label = self.encoder(_box, _label)
        for trans in self.transform:
            img = trans(img)
        return img, label

    def __len__(self):
        return self.num_samples

    def encoder(self, _box, _label):
        """ Encode box coordinates and cls as one labels tensor
        Args:
            _boxes: (tensor) [[x1, y1, x2, y2]_obj1, [...]] normalized from 0.0 to 1.0
            _labels: (tensor) [c_obj1, c_obj2, ...]
        Returns:
            An encoded tensor sized [S, S, 5xB + C], 5=(x,y,w,h,conf)
        """
        s, b, c = self.grid_size, self.num_boxes, self.num_classes
        n = 5 * b + c
        label = torch.zeros((s, s, n))
        cell_size = 1. / s
        _boxes_wh = _box[:, 2:] - _box[:, :2]
        _boxes_cx_cy = (_box[:, 2:] + _box[:, :2]) / 2
        for idx in range(_boxes_cx_cy.size()[0]):
            cx_cy, wh, label_ = _boxes_cx_cy[idx], _boxes_wh[idx], int(_label[idx])

            ij = (cx_cy / cell_size).ceil() - 1
            j, i = int(ij[1]), int(ij[0])    # j & i index which represents its location on the grid
            x_y = ij * cell_size             # x & y is the left-top corner of the cell
            delta_x_y = (cx_cy - x_y) / cell_size

            for k in range(b):
                stride = 5 * k
                label[j, i, stride:stride+2] = delta_x_y
                label[j, i, stride+2:stride+4] = wh
                label[j, i, stride+4] = 1.0
            label[j, i, 5*b+label_] = 1.0
        return label

    def bgr2rgb(self, img):
        return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    def bgr2hsv(self, img):
        return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    def hsv2bgr(self, img):
        return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)

    def random_blur(self, bgr_img):
        if random.random() < 0.5:
            bgr_img = cv2.blur(bgr_img, (5, 5))
            return bgr_img
        return bgr_img

    def random_brightness(self, bgr_img):
        if random.random() < 0.5:
            hsv_img = self.bgr2hsv(bgr_img)
            h, s, v = cv2.split(hsv_img)
            alpha = random.choice([0.5, 1.5])
            v = v * alpha
            v = np.clip(v, 0, 255).astype(hsv_img.dtype)
            hsv_img = cv2.merge([h, s, v])
            bgr_img = self.hsv2bgr(hsv_img)
            return bgr_img
        return bgr_img

    def random_saturate(self, bgr_img):
        if random.random() < 0.5:
            hsv_img = self.bgr2hsv(bgr_img)
            h, s, v = cv2.split(hsv_img)
            alpha = random.choice([0.5, 1.5])
            s = s * alpha
            s = np.clip(s, 0, 255).astype(hsv_img.dtype)
            hsv_img = cv2.merge([h, s, v])
            bgr_img = self.hsv2bgr(hsv_img)
            return bgr_img
        return bgr_img

    def random_hue(self, bgr_img):
        if random.random() < 0.5:
            hsv_img = self.bgr2hsv(bgr_img)
            h, s, v = cv2.split(hsv_img)
            alpha = random.choice([0.5, 1.5])
            h = h * alpha
            h = np.clip(h, 0, 255).astype(hsv_img.dtype)
            hsv_img = cv2.merge([h, s, v])
            bgr_img = self.hsv2bgr(hsv_img)
            return bgr_img
        return bgr_img

    def random_scale(self, bgr_img, boxes):
        if random.random() < 0.5:
            return bgr_img, boxes

        scale = random.uniform(0.8, 1.2)
        h, w, _ = bgr_img.shape
        bgr_img = cv2.resize(bgr_img, (int(w * scale), h), interpolation=cv2.INTER_LINEAR)

        scale_tensor = torch.FloatTensor([[scale, 1.0, scale, 1.0]]).expand_as(boxes)
        boxes = boxes * scale_tensor
        return bgr_img, boxes

    def sub_mean(self, bgr_img, mean):
        mean = np.array(mean, dtype=np.float32)
        bgr_img = bgr_img - mean
        return bgr_img

    def random_flip(self, bgr_img, boxes):
        if random.random() < 0.5:
            bgr_img_lr = np.fliplr(bgr_img).copy()
            h, w, _ = bgr_img_lr.shape
            x_min = w - boxes[:, 2]
            x_max = w - boxes[:, 0]
            boxes[:, 0] = x_min
            boxes[:, 2] = x_max
            return bgr_img_lr, boxes
        return bgr_img, boxes

    def random_bright(self, bgr_img, delta=16):
        alpha = random.random()
        if alpha > 0.3:
            bgr_img = bgr_img * alpha + random.randrange(-delta, delta)
            bgr_img = bgr_img.clip(min=0, max=255).astype(np.uint8)
            return bgr_img
        return bgr_img

    def random_shift(self, bgr_img, boxes, labels):
        shift_ratio = 0.2
        center = (boxes[:, 2:] + boxes[:, :2]) / 2
        if random.random() < 0.5:
            h, w, c = bgr_img.shape
            shift_img = np.zeros((h, w, c), dtype=bgr_img.dtype)
            shift_img[:, :, :] = self.mean[::-1]
            shift_x = random.uniform(-w * shift_ratio, w * shift_ratio)
            shift_y = random.uniform(-h * shift_ratio, h * shift_ratio)
            shift_x, shift_y = int(shift_x), int(shift_y)

            if shift_x >= 0 and shift_y >= 0:
                shift_img[shift_y:, shift_x:, :] = bgr_img[:h-shift_y, :w-shift_x, :]
            elif shift_x >= 0 and shift_y < 0:
                shift_img[:h+shift_y, shift_x:, :] = bgr_img[-shift_y:, :w-shift_x, :]
            elif shift_x < 0 and shift_y >=0:
                shift_img[shift_y:, :w+shift_x, :] = bgr_img[:h-shift_y, -shift_x:, :]
            else:
                shift_img[:h+shift_y, :w+shift_x, :] = bgr_img[-shift_y:, -shift_x:, :]

            shift_xy = torch.FloatTensor([[int(shift_x), int(shift_y)]]).expand_as(center)
            center = center + shift_xy
            mask_1 = (center[:, 0] > 0) & (center[:, 0] < w)
            mask_2 = (center[:, 1] > 0) & (center[:, 1] < h)
            mask = (mask_1 & mask_2).view(-1, 1)
            shift_boxes = boxes[mask.expand_as(boxes)].view(-1, 4)
            if len(shift_boxes) == 0:
                return bgr_img, boxes, labels
            box_shift = torch.FloatTensor([[shift_x, shift_y, shift_x, shift_y]]).expand_as(shift_boxes)
            shift_boxes = shift_boxes + box_shift
            shift_labels = labels[mask.view(-1)]
            return shift_img, shift_boxes, shift_labels
        return bgr_img, boxes, labels

    def random_crop(self, bgr_img, boxes, labels):
        if random.random() < 0.5:
            center = (boxes[:, :2] + boxes[:, 2:]) / 2
            h, w, c = bgr_img.shape
            crop_ratio = 0.2
            h_ = random.uniform(crop_ratio * h, h)
            w_ = random.uniform(crop_ratio * w, w)
            x = random.uniform(0, w - w_)
            y = random.uniform(0, h - h_)
            x, y, w, h = int(x), int(y), int(w_), int(h_)

            center = center - torch.FloatTensor([[x, y]]).expand_as(center)
            mask_1 = (center[:, 0] > 0) & (center[:, 0] < w)
            mask_2 = (center[:, 1] > 0) & (center[:, 1] < h)
            mask = (mask_1 & mask_2).view(-1, 1)

            crop_boxes = boxes[mask.expand_as(boxes)].view(-1, 4)
            if len(crop_boxes) == 0:
                return bgr_img, boxes, labels
            box_shift = torch.FloatTensor([[x, y, x, y]]).expand_as(crop_boxes)

            crop_boxes = crop_boxes - box_shift
            crop_boxes[:, 0] = crop_boxes[:, 0].clamp_(min=0, max=w)
            crop_boxes[:, 2] = crop_boxes[:, 2].clamp_(min=0, max=w)
            crop_boxes[:, 1] = crop_boxes[:, 1].clamp_(min=0, max=h)
            crop_boxes[:, 3] = crop_boxes[:, 3].clamp_(min=0, max=h)

            crop_labels = labels[mask.view(-1)]
            crop_img = bgr_img[y:y+h, x:x+w, :]
            return crop_img, crop_boxes, crop_labels
        return bgr_img, boxes, labels


def decode(img, label, fp_size=7, bbox_num=2):
    boxes = []
    label = label.squeeze(0)
    contain1 = label[:, :, 4].unsqueeze(2)
    contain2 = label[:, :, 9].unsqueeze(2)
    contain = torch.cat((contain1, contain2), 2)
    mask1 = contain > 0.99
    mask2 = (contain == contain.max())
    mask = (mask1 + mask2).gt(0)
    cell_size = 1. / fp_size
    for j in range(fp_size):
        for i in range(fp_size):
            for b in range(bbox_num):
                if mask[i, j, b] != 1:
                    continue
                box = label[i, j, b*5:b*5+4]
                conf = label[i, j, b*5+4]
                xy = torch.FloatTensor([j, i]) * cell_size
                box[:2]=box[:2] * cell_size + xy
                box_xy = torch.FloatTensor(box.size())
                box_xy[:2] = box[:2] - 0.5 * box[2:]
                box_xy[2:] = box[:2] + 0.5 * box[2:]
                box_xy = box_xy.view(1, 4).numpy()
                boxes.append(box_xy)
    return boxes


def test():
    from torch.utils.data import DataLoader

    file_root = '/media/tcl3/bokeh/lipeng/dataset/VOC2007/JPEGImages/'
    train_dataset = YOLODataset(root=file_root, list_file='voc2007.txt', is_train=False, transform=[transforms.ToTensor()])
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False,num_workers=0)
    train_iter = iter(train_loader)
    for idx in range(10):
        img, label = next(train_iter)
        print(img.size(), label.size())
        img_np = img.numpy().squeeze(0).transpose((2, 1, 0))

        img_np = (img_np*255).astype(np.uint8)
        h, w, _ = img_np.shape

        boxes = visualize(img, label, 7, 2)
        print(boxes)
        img_ = np.ones((224, 224, 3))
        img_[:,:,:] = img_np[:,:,:]
        for i in range(len(boxes)):
            box = boxes[i]
            x1, y1, x2, y2 = box[0][0], box[0][1], box[0][2], box[0][3]
            x1 = int(x1*w) if x1*w <= w-1 else w-1
            y1 = int(y1*h) if y1*h <= h-1 else h-1
            x2 = int(x2*w) if x2*w <= w-1 else w-1
            y2 = int(y2*h) if y2*h <= h-1 else h-1
            print(x1, y1, x2, y2)
            cv2.rectangle(img_, (x1, y1), (x2, y2), (0, 0, 255), 1)
        cv2.imwrite('./test/'+ str(idx) + '.jpg', img_)


if __name__ == '__main__':
    test()

