#!/usr/bin/env python
# coding=utf-8

import os
import codecs
import Image
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
import keras.callbacks
from keras import backend as K

np.random.seed(55)
n_channel = 3
DEBUG = False
rotate_90 = False


def speckle(img):
    severity = np.random.uniform(0, 0.6)
    blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1)
    img_speck = (img + blur)
    img_speck[img_speck > 1] = 1
    img_speck[img_speck <= 0] = 0

    return img_speck


class DatasetGenerator(keras.callbacks.Callback):
    def __init__(self, datapath, dataset, image_set, minibatch_size,
                 img_w, img_h, downsample_factor,
                 image_ext='jpg', absolute_max_string_len=16):

        self.datapath = datapath
        self.images_path = os.path.join(self.datapath, dataset + '_images')
        self.labels_path = os.path.join(self.datapath, 'Annotations')
        assert os.path.exists(datapath), \
            'Dataset path does not exist: {}'.format(self.datapath)
        assert os.path.exists(self.images_path), \
            'Images path does not exist: {}'.format(self.images_path)
        assert os.path.exists(self.labels_path), \
            'Labels path does not exist: {}'.format(self.labels_path)

        self.img_w = img_w
        self.img_h = img_h
        self.minibatch_size = minibatch_size
        self.downsample_factor = downsample_factor
        self.absolute_max_string_len = absolute_max_string_len

        self._dataset = dataset
        self._image_set = image_set
        self._image_ext = image_ext

        self._images_index = self._load_image_set_index()
        self._num_images = len(self._images_index)
        self._labels_index = self._load_label_set_index()
        if not self._image_set == 'test':
            assert len(self._images_index) == len(self._labels_index), \
                'The length of images:{} and labels:{} must equal!'.format(len(self._images_index),
                                                                           len(self._labels_index))

        self._classes = self._load_classes()
        self._num_classes = len(self._classes) + 2
        self.blank_label = self._num_classes - 1

        self._epochs_completed = 0
        self._index_in_epoch = 0

    @property
    def num_images(self):
        return self._num_images

    @property
    def num_classes(self):
        return self._num_classes

    @property
    def image_ext(self):
        return self._image_ext

    @property
    def classes(self):
        return self._classes

    @property
    def labels_index(self):
        return self._labels_index

    @property
    def images_index(self):
        return self._images_index

    def image_path_at(self, i):
        """
        Return the absolute path to image i in the image sequence.
        """
        return self.image_path_from_index(self._images_index[i])

    def image_path_from_index(self, index):
        """
        Construct an image path from the image's "index" identifier.
        """
        image_path = os.path.join(self.images_path, index + '.' + self._image_ext)
        assert os.path.exists(image_path), \
            'Image path does not exist: {}'.format(image_path)
        return image_path

    def _load_image_set_index(self):
        """
        Load the indexes listed in this dataset's image set file.
        """
        image_set_file = os.path.join(self.datapath, 'ImageSets', self._dataset, self._image_set + '.txt')
        assert os.path.exists(image_set_file), \
            'Imageset path does not exist: {}'.format(image_set_file)
        with codecs.open(image_set_file, 'r', 'utf-8') as f:
            images_index = [x.strip() for x in f.readlines()]
        return images_index

    def _load_label_set_index(self):
        labels_index = []
        if self._image_set == 'test':
            return labels_index
        label_set_file = os.path.join(self.datapath, 'ImageSets', self._dataset, self._image_set + '_labels.txt')
        if os.path.exists(label_set_file):
            with codecs.open(label_set_file, 'r', 'utf-8') as f:
                for line in f.readlines():
                    line = line.strip()
                    assert len(line) <= self.absolute_max_string_len, \
                        'The length:{} of label:{} in must less than absolute_max_string_len:{}'. \
                            format(len(line), line, self.absolute_max_string_len)
                    labels_index.append(line)
        else:
            assert 0, 'The other method of get labels index is still not add!'

        return labels_index

    def _load_classes(self):
        classes_file = os.path.join(self.datapath, 'ImageSets', self._dataset, 'classes.txt')
        assert os.path.exists(classes_file), \
            'Classes file does not exist: {}'.format(classes_file)
        with codecs.open(classes_file, 'r', 'utf-8') as f:
            classes_index = [x.strip() for x in f.readlines()]
        print "num_classes:", len(classes_index) + 2
        return classes_index

    def _text_to_labels(self, text):
        ret = []
        for char in text:
            if char in self._classes:
                ret.append(self._classes.index(char))
            elif char == ' ':
                ret.append(self.num_classes - 2)
            else:
                print "char:!", char, "! is not in classes!"
                ret.append(self.blank_label)
        return ret

    # this creates larger "blotches" of noise which look
    # more realistic than just adding gaussian noise
    # assumes greyscale with pixels ranging from 0 to 1

    def get_image(self, db_ind):

        imagefile = self.image_path_at(db_ind)
        if n_channel == 1:
            img = Image.open(imagefile).convert('L')
            # 1.rotate
            if rotate_90:
                if img.size[0] < img.size[1]:
                    img = img.transpose(Image.ROTATE_90)
            # 2.resize
            resize_w = self.img_h * img.size[0] / img.size[1]
            resize_h = self.img_w * img.size[1] / img.size[0]
            if resize_w > self.img_w:
                img = img.resize([self.img_w, resize_h], Image.ANTIALIAS)
            else:
                img = img.resize([resize_w, self.img_h], Image.ANTIALIAS)
            if DEBUG:
                print "image:{}, {}".format(self.image_path_at(db_ind), img.size)
                plt.figure(1)
                plt.subplot(141)
                plt.title('resized image')
                plt.imshow(img)

                plt.subplot(142)
                numpy_img = np.array(img)
                print numpy_img.shape
                plt.imshow(np.array(numpy_img))
                plt.title('numpy image0')

            # 注意，img的形状原来是（w,h,c),变成array后变成了（h,w,c),所以要换回来
            img = np.array(img).astype('float32')[:, :, np.newaxis].transpose((1, 0, 2))
            if DEBUG:
                print '_get_minibatch() img:', img.shape
        else:
            img = Image.open(imagefile)
            # 1.rotate
            if rotate_90:
                if img.size[0] < img.size[1]:
                    img = img.transpose(Image.ROTATE_90)
            # 2.resize
            resize_w = self.img_h * img.size[0] / img.size[1]
            resize_h = self.img_w * img.size[1] / img.size[0]
            if resize_w > self.img_w:
                img = img.resize([self.img_w, resize_h], Image.ANTIALIAS)
            else:
                img = img.resize([resize_w, self.img_h], Image.ANTIALIAS)
            if DEBUG:
                print "image:{}".format(self.image_path_at(db_ind))
                plt.figure(1)
                plt.subplot(141)
                plt.title('resized image')
                plt.imshow(img, cmap='Greys_r')

                plt.subplot(142)
                numpy_img = np.array(img)
                print numpy_img.shape
                plt.imshow(np.array(numpy_img))
                plt.title('numpy image0')

            # 注意，img的形状原来是（w,h,c),变成array后变成了（h,w,c),所以要换回来
            img = np.array(img).astype('float32').transpose((1, 0, 2))[:, :, :n_channel]
            if DEBUG:
                print '_get_minibatch() img:', img.shape

        # 3.norm
        img = img * 1.0 / 255

        if DEBUG:
            plt.subplot(143)
            if n_channel == 1:
                gray_img = img.transpose((1, 0, 2))[:, :, 0]
                print 'gray img:', gray_img.shape
                plt.imshow(gray_img)
            elif n_channel == 3:
                plt.imshow(img.transpose((1, 0, 2)))
            plt.title('numpy image')

        # 4.padding
        if self.img_h == img.shape[1]:
            pad_w = self.img_w - resize_w
            left_pad = pad_w // 2
            right_pad = pad_w - left_pad
            # 从左右两边补0
            img_0 = np.lib.pad(img, ((left_pad, right_pad), (0, 0), (0, 0)), 'constant', constant_values=(0, 0))
            # print '_get_minibatch() img_0(_need_w > resize_w):', img_0.shape

            assert img_0.shape[0] == self.img_w
        else:
            pad_h = self.img_h - resize_h
            up_pad = pad_h // 2
            down_pad = pad_h - up_pad
            # 从上下两边补0
            img_0 = np.lib.pad(img, ((0, 0), (up_pad, down_pad), (0, 0)), 'constant', constant_values=(0, 0))

            # print "up_pad:{},down_pad:{}".format(up_pad, down_pad)
            # print '_get_minibatch() img_0(_need_w > resize_w):', img_0.shape

        if DEBUG:
            print '_get_minibatch() img_0:', img_0.shape
            plt.subplot(144)
            if n_channel == 1:
                gray_img1 = img_0.transpose((1, 0, 2))[:, :, 0]
                print 'gray img:', gray_img1.shape
                plt.imshow(gray_img1)
            elif n_channel == 3:
                plt.imshow(img_0.transpose((1, 0, 2)))
            plt.title('split image')
            plt.show()
        return img_0

    def _get_next_minibatch_inds(self, batch_size, shuffle=True):

        start = self._index_in_epoch
        # Shuffle for the first epoch
        if self._epochs_completed == 0 and start == 0 and shuffle:
            self._perm = np.arange(self._num_images)
            np.random.shuffle(self._perm)

        # Go to the next epoch
        if start + batch_size <= self._num_images:
            self._index_in_epoch += batch_size
            end = self._index_in_epoch
            db_inds = self._perm[start:end]
        else:
            # Finished epoch
            self._epochs_completed += 1
            # Get the rest examples in this epoch
            rest_num_images = self._num_images - start
            db_inds0 = self._perm[start:self._num_images]

            # Shuffle the data
            if shuffle:
                self._perm = np.arange(self._num_images)
                np.random.shuffle(self._perm)

            # Start next epoch
            start = 0
            self._index_in_epoch = batch_size - rest_num_images
            end = self._index_in_epoch
            db_inds = np.append(db_inds0, self._perm[start:end])
            assert len(db_inds) == batch_size, 'The length of db_inds:{} and batch size:{} not match!'. \
                format(len(db_inds), batch_size)

        return db_inds

    def _get_batch(self, minibatch, train=True):

        db_inds = self._get_next_minibatch_inds(minibatch, shuffle=True)
        # print "db_inds:{}, {}".format(db_inds, len(db_inds))
        batch_size = len(db_inds)
        # width and height are backwards from typical Keras convention
        # because width is the time dimension when it gets fed into the RNN
        if K.image_data_format() == 'channels_first':
            X_data = np.ones([batch_size, n_channel, self.img_w, self.img_h])
        else:
            X_data = np.ones([batch_size, self.img_w, self.img_h, n_channel])

        labels = np.ones([batch_size, self.absolute_max_string_len])
        input_length = np.zeros([batch_size, 1])
        label_length = np.zeros([batch_size, 1])
        source_str = []
        for index, db_ind in enumerate(db_inds):
            # print "{}_{}".format(index, db_ind)
            # Mix in some blank inputs.  This seems to be important for
            # achieving translational invariance
            if train and index > batch_size - 4:
                if K.image_data_format() == 'channels_first':
                    blank_img = np.zeros((n_channel, self.img_w, self.img_h), dtype=np.float32)
                    X_data[index, :, 0:self.img_w, :] = speckle(blank_img)
                else:
                    blank_img = np.zeros((self.img_w, self.img_h, n_channel), dtype=np.float32)
                    X_data[index, 0:self.img_w, :, :] = speckle(blank_img)

                labels[index, 0] = self.blank_label
                input_length[index] = self.img_w // self.downsample_factor - 2
                label_length[index] = 1
                source_str.append('')
                # print db_ind
                # print source_str[-1]
            else:
                # print db_ind
                # print self._labels_index[db_ind]
                if K.image_data_format() == 'channels_first':
                    X_data[index, :n_channel, 0:self.img_w, :] = self.get_image(db_ind)
                else:
                    X_data[index, 0:self.img_w, :, 0:n_channel] = self.get_image(db_ind)
                Y_data = np.ones([self.absolute_max_string_len]) * -1
                word = self._labels_index[db_ind]
                # print "word:{}".format(word)
                Y_data[0:len(word)] = self._text_to_labels(word)

                labels[index] = Y_data
                input_length[index] = self.img_w // self.downsample_factor - 2
                label_length[index] = len(word)
                source_str.append(word)
                # print 'source_str:{}'.format(source_str[-1].encode('utf-8'))
                # # print 'the_input:{}'.format(X_data.shape)
                # print 'the_labels:{}, {}'.format(len(labels), labels[index])
                # print 'input_length:{}'.format(np.array(input_length).ravel())
                # print 'label_length{}'.format(np.array(label_length).ravel())

        inputs = {'the_input': X_data,  # [minibatch, 0:n_channel, img_w, img_h]
                  'the_labels': labels,  # [minibatch, len(true_text)] each element is true_text
                  'input_length': input_length,  # [minibatch] each element is the input size to ctc
                  'label_length': label_length,  # [minibatch] each element is len(true_text)
                  'source_str': source_str  # used for visualization only
                  }
        outputs = {'ctc': np.zeros([batch_size])}  # dummy data for dummy loss function
        # if DEBUG:
        # print 'the_input:{}'.format(X_data.shape)
        # print 'the_labels:{},{},{},{},{},{},{}'.format(len(labels), labels[0], labels[1], labels[2], labels[3], labels[4], labels[5])
        # # print 'input_length:{}'.format(np.array(input_length).ravel())
        # print 'label_length{}'.format(np.array(label_length).ravel())
        # print 'source_str:{},{},{},{},{},{}'.format(source_str[0].encode('utf-8'), source_str[1].encode('utf-8'),
        #                                             source_str[2].encode('utf-8'), source_str[3].encode('utf-8'),
        #                                             source_str[4].encode('utf-8'), source_str[5].encode('utf-8'))

        return inputs, outputs

    def next_batch(self):
        while 1:
            # print self._image_set
            train = (self._image_set == 'train')
            print "is train?:", train
            # if self._image_set == 'val':
            #     global DEBUG
            #     DEBUG = True
            ret = self._get_batch(self.minibatch_size, train=train)

            yield ret


if __name__ == '__main__':
    # Input Parameters
    img_h = 32
    img_w = 128
    # Network parameters
    conv_filters = 16
    kernel_size = (3, 3)
    pool_size = 2
    time_dense_size = 64
    rnn_size = 512
    minibatch_size = 128

    if K.image_data_format() == 'channels_first':
        input_shape = (n_channel, img_w, img_h)
    else:
        input_shape = (img_w, img_h, n_channel)

    datapath = '/home/hsp/jr/work/ocr/data'
    dataset = 'COCO-Text'
    img_gen = DatasetGenerator(datapath=datapath,
                               dataset=dataset,
                               image_set='test',
                               minibatch_size=minibatch_size,
                               img_w=img_w,
                               img_h=img_h,
                               downsample_factor=(pool_size ** 2),
                               image_ext='jpg',
                               absolute_max_string_len=50)

    for i in xrange(100):
        print "------------------------{}-------------------------".format(i)
        inputs, outputs = next(img_gen.next_batch())

        for ii in inputs:
            print 'source_str:{}'.format(ii['source_str'].encode('utf-8'))

    for key, value in inputs.items():
        print "train:" + key + ':'
        print value, len(value)

    print img_gen.num_classes
    print img_gen.labels_index
    print img_gen.num_images
    print img_gen.classes
