import os
import torch
from scipy.ndimage import io
from torch.utils import data
import numpy as np
import cv2
from torch.utils.data import DataLoader
from torchvision.transforms import transforms


class CarDataset(data.Dataset):#需要继承data.Dataset
    def __init__(self, anno_file, root_dir, train=True, transform=None, rgb=False):
        # 1. Initialize file path or list of file names.
        self.root_dir = root_dir
        self.annos_frame = []
        self.rgb = rgb
        with open(os.path.join(self.root_dir, anno_file), 'r') as f:
            self.annos_frame = f.readlines()
        self.transform = transform
        self.train = train

    def __len__(self):
        return len(self.annos_frame)

    def __getitem__(self, idx):
        annotation = self.annos_frame[idx].strip().split(' ')
        img_name = annotation[0]
        image = io.imread(self.root_dir + img_name)
        if not self.rgb:
            image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
            image = image[:, :, np.newaxis]
        # label默认为1，表示的是车辆
        label = np.array(1).astype(int)


        if len(annotation[1:]) != 1:
            bbox_target = annotation[1:]
            bbox_target = np.array(bbox_target).astype(float)
            bbox_target = np.ceil(bbox_target).astype(int)
            bbox_target = bbox_target.reshape((-1, 4))
            sample = {'image': image, 'label': label, 'bbox_target': bbox_target} # 'landmarks':landmarks}
        else:
            sample = {'image': image, 'label': label}
        if self.transform:
            sample = self.transform(sample)
        return sample

class IMG_Dataset(data.Dataset):#需要继承data.Dataset
    def __init__(self, anno_file, prefix='', train=True, transform=None):
        # 1. Initialize file path or list of file names.
        self.prefix = prefix
        self.annos_frame = []
        with open(os.path.join(self.prefix, anno_file), 'r') as f:
            self.annos_frame = f.readlines()
        for i in range(len(self.annos_frame)):
            anno = self.annos_frame[i].strip().split(' ')
            self.annos_frame[i] = anno[0]
        self.transform = transform
        self.train = train

    def __len__(self):
        return len(self.annos_frame)

    def __getitem__(self, idx):
        annotation = self.annos_frame[idx]
        img_name = os.path.join(self.prefix, annotation)
        image = io.imread(img_name)
        # swap color axis because
        # numpy image: H x W x C
        # torch image: C X H X W
        if self.transform:
            image = self.transform(image)
        return image

class Img2Tensor(object):
    """Convert ndarrays in sample to Tensors."""
    def __call__(self, image):
        # swap color axis because
        # numpy image: H x W x C
        # torch image: C X H X W
        image = image.transpose((2, 0, 1))
        return torch.from_numpy(image).float()

class ToTensor(object):
    """Convert ndarrays in sample to Tensors."""
    def __call__(self, sample):
        if len(sample) == 2:
            image, label = sample['image'],sample['label']
            # swap color axis because
            # numpy image: H x W x C
            # torch image: C X H X W
            image = image.transpose((2, 0, 1))
            return {'image': torch.from_numpy(image/255).float(),
                    'label': torch.from_numpy(label).float()}
        else:
            image, label, bbox = sample['image'], sample['label'], sample['bbox_target']
            # swap color axis because
            # numpy image: H x W x C
            # torch image: C X H X W
            image = image.transpose((2, 0, 1))
            return {'image': torch.from_numpy(image/255).float(),
                    'label': torch.from_numpy(label).float(),
                    'bbox_target': torch.from_numpy(bbox).float()}
###
#测试代码：用于测试上述代码的可行性
###
# transform=transforms.Compose([ToTensor()])
#
# test_datasets = CarDataset(anno_file='E://workspace//UA-DETRAC//train_gt.txt',
#                            root_dir='E://workspace//UA-DETRAC//DETRAC-train-data//Insight-MVT_Annotation_Train//',
#                            transform=transform)
# dataloader = DataLoader(test_datasets, batch_size=20,
#                         shuffle=True)# 加入num_workers=4会报错，这是Pytorch在windows下的BUG
#
# for i_batch, sample_batched in enumerate(dataloader):
#     print(i_batch, sample_batched['image'].size(),
#           sample_batched['bbox_target'].size())
