# -*- coding:utf-8 -*-
import os
import json
import numpy as np
from PIL import Image
import FileLoad


def load_number_data(data_dir, flatten=False):
    train_dir = os.path.join(data_dir, 'train_img')
    test_dir = os.path.join(data_dir, 'test_img')

    meta_info = os.path.join(train_dir, 'meta.json')
    with open(meta_info, 'r') as f:
        meta = json.load(f)

    train_images, train_labels = _read_images_and_labels_cqu(train_dir, flatten=flatten, **meta)
    test_images, test_labels = _read_images_and_labels_cqu(test_dir, flatten=flatten, **meta)

    print train_images.shape
    print train_labels.shape

    train_labels_dataset = DataSet(train_images, train_labels)
    test_labels_dataset = DataSet(test_images, test_labels)

    return (
        meta,
        train_labels_dataset,
        test_labels_dataset
    )


def _read_images_and_labels_cqu(dir_name, flatten, **meta):
    dir_list = os.listdir(dir_name)
    count = 0
    images = []
    labels = []
    for dir in dir_list:
        if dir == ".DS_Store":
            continue
        if dir == "meta.json":
            continue
        img_dir = os.path.join(dir_name, dir)
        file_list = FileLoad.getFileList(img_dir)
        for img_path in file_list:
            count = count + 1
            img_data = _read_image(img_path, flatten=flatten, **meta)
            lable_data = _read_lable_cqu(dir, **meta)
            # print dir, lable_data
            labels.append(lable_data)
            images.append(img_data)
            # break
            # break
    return np.array(images), np.array(labels)


def _read_lable_cqu(dir_name, label_choices, **extra_meta):
    split = dir_name.split(".")
    lable_data = np.zeros(20, int)
    if len(split) == 2:
        index = int(split[0]) + 10
        lable_data[index] = 1
    else:
        lable_data[int(split[0])] = 1
    return lable_data


def load_data(data_dir, flatten=False):
    train_dir = os.path.join(data_dir, 'train')
    test_dir = os.path.join(data_dir, 'test')

    meta_info = os.path.join(data_dir, 'meta.json')
    with open(meta_info, 'r') as f:
        meta = json.load(f)

    train_images, train_labels = _read_images_and_labels(train_dir, flatten=flatten, **meta)
    test_images, test_labels = _read_images_and_labels(test_dir, flatten=flatten, **meta)

    return (
        meta,
        DataSet(train_images, train_labels),
        DataSet(test_images, test_labels),
    )


def _read_images_and_labels(dir_name, flatten, ext='.png', **meta):
    images = []
    labels = []
    for fn in os.listdir(dir_name):
        if fn.endswith(ext):
            fd = os.path.join(dir_name, fn)
            images.append(_read_image(fd, flatten=flatten, **meta))
            labels.append(_read_label(fd, **meta))
    return np.array(images), np.array(labels)


def _read_image(filename, flatten, width, height, **extra_meta):
    im = Image.open(filename).convert('L').resize((width, height), Image.ANTIALIAS)

    data = np.asarray(im)
    if flatten:
        return data.reshape(width * height)

    return data


def _read_label(filename, label_choices, **extra_meta):
    basename = os.path.basename(filename)
    labels = basename.split('_')[0]

    data = []

    for c in labels:
        idx = label_choices.index(c)
        tmp = [0] * len(label_choices)
        tmp[idx] = 1
        data.extend(tmp)

    return data


class DataSet(object):
    """Provide `next_batch` method, which returns the next `batch_size` examples from this data set."""

    def __init__(self, images, labels):
        assert images.shape[0] == labels.shape[0], (
            'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
        self._num_examples = images.shape[0]

        self._images = images
        self._labels = labels

        self._epochs_completed = 0
        self._index_in_epoch = 0

    @property
    def images(self):
        return self._images

    @property
    def labels(self):
        return self._labels

    @property
    def num_examples(self):
        return self._num_examples

    @property
    def epochs_completed(self):
        return self._epochs_completed

    def next_batch(self, batch_size):

        assert batch_size <= self._num_examples

        if self._index_in_epoch + batch_size > self._num_examples:
            # Finished epoch
            self._epochs_completed += 1
            self._index_in_epoch = 0

        if self._index_in_epoch == 0:
            # Shuffle the data
            perm = np.arange(self._num_examples)
            np.random.shuffle(perm)
            self._images = self._images[perm]
            self._labels = self._labels[perm]

        # read next batch
        start = self._index_in_epoch
        self._index_in_epoch += batch_size
        return self._images[start:self._index_in_epoch], self._labels[start:self._index_in_epoch]


def display_debug_info(meta, train_data, test_data):
    print '%s Meta Info %s' % ('=' * 10, '=' * 10)
    for k, v in meta.items():
        print '%s: %s' % (k, v)
    print '=' * 30

    count = train_data.images.shape[0]

    images = train_data.images.reshape(count, 120, 90)

    print 'train images: %s, labels: %s' % (images.shape, train_data.labels.shape)

    print 'test images: %s, labels: %s' % (test_data.images.shape, test_data.labels.shape)

    print train_data.labels[9]
    img = Image.fromarray(images[9])
    img.show()


if __name__ == '__main__':
    # ret1 = load_data("/Users/cheng/Desktop/DeepLearning/Project/captcha-tensorflow/images/char-1-epoch-20", flatten=True)
    # display_debug_info(*ret1)

    ret1 = load_number_data("/Users/cheng/Desktop/python/cqu/GUI/", True)
    display_debug_info(*ret1)
