import argparse
import os
import warnings
import numpy as np
from skimage.io import imread
from skimage.transform import resize
import xml.etree.ElementTree as ET

import chainer
from chainercv.visualizations import vis_bbox
import matplotlib.pyplot as plt


label_names = (
    'sitting',
    'standing',
    'lying',
)


class PoseBboxDataset(chainer.dataset.DatasetMixin):

    """Bounding box dataset for Depth dataset
    modified from chainercv.datasets.VOCBboxDataset
    `img, bbox, label`: a tuple of an image, bounding boxes and labels.

    The bounding boxes are packed into a two dimensional tensor of shape
    :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in
    the image. The second axis represents attributes of the bounding box.
    They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the
    four attributes are coordinates of the top left and the bottom right
    vertices.

    The labels are packed into a one dimensional tensor of shape :math:`(R,)`.
    :math:`R` is the number of bounding boxes in the image.

    The type of the image, the bounding boxes and the labels are as follows.

    * :obj:`img.dtype == numpy.float32`
    * :obj:`bbox.dtype == numpy.float32`
    * :obj:`label.dtype == numpy.int32`

    Args:
        data_dir (string): Path to the root of the training data. If this is
            :obj:`auto`, this class will automatically download data for you
            under :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/voc`.
        split ({'trainval', 'test'}): Select a split of the
            dataset.
        return_id (bool): return the id of the image or not.
        has_anno (bool): whether an image has corresponding Annotations.
    """

    def __init__(self, data_dir='./data/Depth', split='trainval', return_id=False, has_anno=True):
        self.data_dir = data_dir
        self.split = split
        if self.split not in ['trainval', 'test', 'all-marked']:
            warnings.warn(
                'only [trainval, test, all-marked] are available.'
            )
        id_list_file = os.path.join(
            self.data_dir, 'ImageSets/Main/{0}.txt'.format(self.split))
        self.ids = [id_.strip() for id_ in open(id_list_file)]
        self.return_id = return_id
        self.has_anno = has_anno

    def __len__(self):
        return len(self.ids)

    def get_example(self, i):
        """Returns the i-th example.

        Returns a color image and bounding boxes. The image is in CHW format.
        The returned image is RGB.

        Args:
            i (int): The index of the example.

        Returns:
            tuple of an image and bounding boxes (img_id)

        """
        id_ = self.ids[i]
        if self.has_anno:
            anno = ET.parse(
                os.path.join(self.data_dir, 'Annotations', id_ + '.xml'))
            bbox = list()
            label = list()

            for obj in anno.findall('object'):
                bndbox_anno = obj.find('bndbox')
                # subtract 1 to make pixel indexes 0-based
                bbox.append([
                    int(bndbox_anno.find(tag).text) - 1
                    for tag in ('ymin', 'xmin', 'ymax', 'xmax')])
                name = obj.find('name').text.lower().strip()
                label.append(label_names.index(name))
            bbox = np.stack(bbox).astype(np.float32)
            label = np.stack(label).astype(np.int32)
        else:
            bbox = None
            label = None
        # Load a image
        img_file = os.path.join(self.data_dir, 'JPEGImages', id_ + '.png')
        img = imread(img_file)
        if self.return_id:
            return img, bbox, label, id_
        return img, bbox, label


def add_white_space(img, white_space=5):
    """
    args:
        img : ndarray in [img_rows, img_cols, channels], dtype as unit8
        white_space : white_space to add
    return:
        tmp_img : processed img
    """
    if len(img.shape) == 3:
        # adding white space of multi-channel image
        img_rows, img_cols, channels = img.shape
        tmp_img = np.ones((img_rows + 2 * white_space,
                           img_cols + 2 * white_space,
                           channels), np.uint8) * 255
    else:
        # adding white space of multi-channel image
        img_rows, img_cols = img.shape
        tmp_img = np.ones((img_rows + 2 * white_space,
                           img_cols + 2 * white_space), np.uint8) * 255
    tmp_img[white_space: white_space + img_rows,
            white_space: white_space + img_cols] = img
    return tmp_img


def patch_to_img(patches, disp_rows, disp_cols, direction='horizontal'):
    """
      args:
        patches: img patches
        disp_rows: patch number in rows
        disp_cols: patch number in cols
        direction: horizontal or vertical
      return:
        img
    """
    if len(patches[0].shape) == 3:
        img_rows, img_cols, nb_ch = patches[0].shape
    else:
        img_rows, img_cols = patches[0].shape
        nb_ch = 1
    if direction == "horizontal":
        # align models by cols
        if nb_ch == 3:
            img = np.zeros((img_rows * disp_rows,
                            img_cols * disp_cols,
                            3), "uint8")
        else:
            img = np.zeros((img_rows * disp_rows,
                            img_cols * disp_cols), "uint8")
        for i in range(disp_rows):
            for j in range(disp_cols):
                img[i * img_rows:(i + 1) * img_rows,
                    j * img_cols:(j + 1) * img_cols] = patches[i * disp_cols + j]
    else:
        # align models by rows
        if nb_ch == 3:
            img = np.zeros((img_rows * disp_cols,
                            img_cols * disp_rows,
                            3), "uint8")
        else:
            img = np.zeros((img_rows * disp_cols,
                            img_cols * disp_rows), "uint8")
        for i in range(disp_rows):
            for j in range(disp_cols):
                img[j * img_rows:(j + 1) * img_rows,
                    i * img_cols:(i + 1) * img_cols] = patches[i * disp_cols + j]
    return img


def get_data(split='trainval', img_rows=56, img_cols=56):
    dataset = PoseBboxDataset(split=split)
    X_data, y_data = [], []
    for i in range(len(dataset)):
        img, bbox, label = dataset[i]
        for idx in range(len(bbox)):
            ymin, xmin, ymax, xmax = [int(x) for x in bbox[idx]]
            extract_img = img[ymin:ymax, xmin:xmax]
            resize_img = resize(extract_img, (img_cols, img_rows), mode='edge')
            X_data.append(np.expand_dims(resize_img, axis=-1))
            y_data.append(label[idx])

    X_data = np.array(X_data)
    y_data = np.array(y_data)
    return X_data, y_data


def show_hist(y_data, split='trainval'):
    names = ('Sitting','Standing','Lying')
    values = (1, 2, 3)
    hist, bins = np.histogram(y_data, bins=[0, 1, 2, 3])
    labels = []
    for i, j in zip(names, list(hist)):
        labels.append('{}\n({})'.format(i, j))
    plt.figure(figsize=(20, 10))
    plt.bar(values, hist, width=0.5)
    plt.ylabel('Number of samples', fontsize=20, color='black')
    plt.xlabel('Image Annotations', fontsize=20, color='black')
    plt.title("Label Histogram of {}".format(split))
    plt.xticks(values, labels)
    plt.grid(True, axis='y')
    plt.savefig('data/{}_label_hist.png'.format(split))


if __name__ == '__main__':
    for split in ['trainval', 'test', 'all-marked']:
        x, y = get_data(split)
        show_hist(y, split)

    # for i in range(5):
    #     plt.figure()
    #     plt.imshow(x[i, :, :, 0], 'gray')
    #     plt.title(label_names[y[i]])
    # plt.show()