#!/usr/bin/env python
# coding=utf-8

import numpy as np
from keras import backend as K
import keras.callbacks
import os
import json
import codecs
import Image
import matplotlib.pyplot as plt
from scipy import ndimage

DEBUG = False
np.random.seed(55)
n_channel = 3
rotate_90 = False


# this creates larger "blotches" of noise which look
# more realistic than just adding gaussian noise
# assumes greyscale with pixels ranging from 0 to 1

def speckle(img):
    severity = np.random.uniform(0, 0.6)
    blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1)
    img_speck = (img + blur)
    img_speck[img_speck > 1] = 1
    img_speck[img_speck <= 0] = 0
    return img_speck


# Uses generator functions to supply train/test with
# data. Image renderings are text are created on the fly
# each time with random perturbations

class DatasetGenerator(keras.callbacks.Callback):
    def __init__(self, dataset, train_minibatch_size, val_minibatch_size, img_w, img_h,
                 downsample_factor, absolute_max_string_len=16):

        self._dataset = dataset
        self._image_set = 'train'
        self._images_path = os.path.join(self._dataset, 'ICDAR_ch2_Task3')
        self._labels_path = os.path.join(self._dataset, 'Annotations')

        assert os.path.exists(self._dataset), \
            'Dataset path does not exist: {}'.format(self._dataset)
        assert os.path.exists(self._images_path), \
            'Image path does not exist: {}'.format(self._images_path)
        assert os.path.exists(self._images_path), \
            'Annotation path does not exist: {}'.format(self._labels_path)

        self.img_w = img_w
        self.img_h = img_h
        self.train_minibatch_size = train_minibatch_size
        self.val_minibatch_size = val_minibatch_size
        self.absolute_max_string_len = absolute_max_string_len
        self.downsample_factor = downsample_factor

        self._image_ext = '.png'
        self._classes = self._load_classes()
        self._train_image_index = self._load_image_set_index('train')
        self._val_image_index = self._load_image_set_index('val')
        self._train_num_images = len(self._train_image_index)
        self._val_num_images = len(self._val_image_index)
        self._train_label_index = self._load_label_set_index('train')
        self._val_label_index = self._load_label_set_index('val')
        assert self._train_num_images == len(self._train_label_index), \
            'The length of train image and label must equal!'
        assert self._val_num_images == len(self._val_label_index), \
            'The length of validation image and label must equal!'

        # The last one is blank, the second last one is ' '
        self._num_classes = len(self._classes) + 2
        self.blank_label = self._num_classes - 1

        self._train_epochs_completed = 0
        self._val_epochs_completed = 0
        self._train_index_in_epoch = 0
        self._val_index_in_epoch = 0

    @property
    def num_classes(self):
        return self._num_classes

    @property
    def classes(self):
        return self._classes

    @property
    def train_num_images(self):
        return self._train_num_images

    @property
    def val_num_images(self):
        return self._val_num_images

    @property
    def train_label_index(self):
        return self._train_label_index

    @property
    def val_label_index(self):
        return self._val_label_index

    def image_path_at(self, i, image_set):
        """
        Return the absolute path to image i in the image sequence.
        """
        if image_set == 'train':
            return self.image_path_from_index(self._train_image_index[i])
        elif image_set == 'val':
            return self.image_path_from_index(self._val_image_index[i])
        else:
            raise ValueError("Unknown imageset：{}！".format(image_set))

    def image_path_from_index(self, index):
        """
        Construct an image path from the image's "index" identifier.
        """
        image_path = os.path.join(self._images_path, index + self._image_ext)
        assert os.path.exists(image_path), \
            'Image file does not exist: {}'.format(image_path)
        return image_path

    def _load_classes(self):
        classes_file = os.path.join(self._dataset, 'ImageSets/ICDAR_ch2_Task3', 'classes.txt')
        with codecs.open(classes_file, 'r', 'utf-8') as f:
            cla = [x.strip() for x in f.readlines()]
        print "cla:", len(cla) + 2
        return cla

    def _load_image_set_index(self, image_set):
        """
        Load the indexes listed in this dataset's image set file.
        """
        image_set_file = os.path.join(self._dataset, 'ImageSets/ICDAR_ch2_Task3', image_set + '.txt')
        assert os.path.exists(image_set_file), \
            'Path does not exist: {}'.format(image_set_file)
        with open(image_set_file, 'rt') as f:
            image_index = [x.strip() for x in f.readlines()]
        return image_index

    def _load_label_set_index(self, image_set):
        labels_file = os.path.join(self._dataset, 'ImageSets/ICDAR_ch2_Task3', image_set + '_labels.txt')
        if os.path.exists(labels_file):
            labels = []
            i = 1
            with codecs.open(labels_file, 'r', 'utf-8') as f:
                for x in f.readlines():
                    if len(x.strip()) < self.absolute_max_string_len:
                        labels.append(x.strip())
                        if len(x.strip()) < 1:
                            assert 0, 'The label is too short:{} in line {}'.\
                                format(x.strip(), i, self.absolute_max_string_len)
                    else:
                        assert 0, 'The label is too long:{} in line {}, must less than {}'.\
                            format(x.strip(), i, self.absolute_max_string_len)
                    i += 1
        else:
            labels = []
            if image_set == 'train':
                num_im = self._train_num_images
                image_index = self._train_image_index
            elif image_set == 'val':
                num_im = self._val_num_images
                image_index = self._val_image_index
            else:
                raise ValueError("Unknown imageset：{}！".format(image_set))
            for i in xrange(num_im):
                annofile = os.path.join(self._labels_path, image_index[i] + '.json')
                with open(annofile) as json_file:
                    anno = json.load(json_file)
                assert anno['filename'][:-4] == image_index[i], \
                    'The label file {:s} and the image {:s} not match!'.format(annofile, image_index[i])
                if '\n' in anno['text']:
                    print 'The label file {:s} has huanhangfu!'.format(annofile)
                    os.system("mv {} {}".
                              format(annofile, '/home/hsp/jr/work/Font_recognition_280/data/Annotations_wrong'))
                    continue
                labels.append(anno['text'])
            print 'labels length:', len(labels)

            buf = '\n'.join(labels)
            with codecs.open(labels_file, 'w', 'utf-8') as f:
                # for label in labels:
                f.write(buf)
        return labels

    # num_words can be independent of the epoch size due to the use of generators
    # as max_string_len grows, num_words can grow
    def get_image(self, index, image_set):
        # print "get_image:", index, self._image_index[index]

        imagefile = self.image_path_at(index, image_set)
        if n_channel == 1:
            img = Image.open(imagefile).convert('L')
            if rotate_90:
                if img.size[0] < img.size[1]:
                    img = img.transpose(Image.ROTATE_90)
            resize_w = self.img_h * img.size[0] / img.size[1]
            resize_h = self.img_w * img.size[1] / img.size[0]
            if resize_w > self.img_w:
                img = img.resize([self.img_w, resize_h], Image.ANTIALIAS)
            else:
                img = img.resize([resize_w, self.img_h], Image.ANTIALIAS)
            if DEBUG:
                print "image:{}, {}".format(self.image_path_at(index, image_set), img.size)
                plt.figure(1)
                plt.subplot(141)
                plt.title('resized image')
                plt.imshow(img)

                plt.subplot(142)
                numpy_img = np.array(img)
                print numpy_img.shape
                plt.imshow(np.array(numpy_img))
                plt.title('numpy image0')

            img = np.array(img).astype('float32')[:, :, np.newaxis].transpose((1, 0, 2))
            if DEBUG:
                print '_get_minibatch() img:', img.shape
        else:
            img = Image.open(imagefile)
            if rotate_90:
                if img.size[0] < img.size[1]:
                    img = img.transpose(Image.ROTATE_90)
            # 1.resize
            resize_w = self.img_h * img.size[0] / img.size[1]
            resize_h = self.img_w * img.size[1] / img.size[0]
            if resize_w > self.img_w:
                img = img.resize([self.img_w, resize_h], Image.ANTIALIAS)
            else:
                img = img.resize([resize_w, self.img_h], Image.ANTIALIAS)
            if DEBUG:
                print "image:{}".format(self.image_path_at(index, image_set))
                plt.figure(1)
                plt.subplot(141)
                plt.title('resized image')
                plt.imshow(img, cmap='Greys_r')

                plt.subplot(142)
                numpy_img = np.array(img)
                print numpy_img.shape
                plt.imshow(np.array(numpy_img))
                plt.title('numpy image0')
            # 注意，img的形状原来是（w,h,c),变成array后变成了（h,w,c),所以要换回来
            img = np.array(img).astype('float32').transpose((1, 0, 2))[:, :, :n_channel]
            if DEBUG:
                print '_get_minibatch() img:', img.shape

        img = img * 1.0 / 255

        if DEBUG:
            plt.subplot(143)
            if n_channel == 1:
                gray_img = img.transpose((1, 0, 2))[:, :, 0]
                print 'gray img:', gray_img.shape
                plt.imshow(gray_img)
            elif n_channel == 3:
                plt.imshow(img.transpose((1, 0, 2)))
            plt.title('numpy image')

        # padding or center split
        if self.img_h == img.shape[1]:
            pad_w = self.img_w - resize_w
            left_pad = pad_w // 2
            right_pad = pad_w - left_pad
            # 从两边补均值
            img_0 = np.lib.pad(img, ((left_pad, right_pad), (0, 0), (0, 0)), 'constant', constant_values=(0, 0))
            # print '_get_minibatch() img_0(_need_w > resize_w):', img_0.shape

            assert img_0.shape[0] == self.img_w
        else:
            pad_h = self.img_h - resize_h
            up_pad = pad_h // 2
            down_pad = pad_h - up_pad
            # 从两边补均值
            img_0 = np.lib.pad(img, ((0, 0), (up_pad, down_pad), (0, 0)), 'constant', constant_values=(0, 0))
            # print '_get_minibatch() img_0(_need_w > resize_w):', img_0.shape

        if DEBUG:
            print '_get_minibatch() img_0:', img_0.shape
            plt.subplot(144)
            if n_channel == 1:
                gray_img1 = img_0.transpose((1, 0, 2))[:, :, 0]
                print 'gray img:', gray_img1.shape
                plt.imshow(gray_img1)
            elif n_channel == 3:
                plt.imshow(img_0.transpose((1, 0, 2)))
            plt.title('split image')
            plt.show()

        return img_0

    def text_to_labels(self, s):
        ret = []
        # print s
        for char in s:
            try:
                if char == ' ':
                    ind = self.num_classes - 2
                    # print "kong:{}!".format(char.encode('utf-8'))
                else:
                    ind = self._classes.index(char)
            except:
                print "Wrong char:{}!".format(char.encode('utf-8'))
            ret.append(ind)
        # print ret
        return ret

    def _get_next_minibatch_inds(self, batch_size, image_set, shuffle=True):

        # Shuffle for the first epoch
        if image_set == 'train':
            start = self._train_index_in_epoch
            n_images = self._train_num_images
            if self._train_epochs_completed == 0 and start == 0 and shuffle:
                self._train_perm = np.arange(n_images)
                np.random.shuffle(self._train_perm)

            index_in_epoch = self._train_index_in_epoch
            perm = self._train_perm
        elif image_set == 'val':
            start = self._val_index_in_epoch
            n_images = self._val_num_images
            if self._val_epochs_completed == 0 and start == 0 and shuffle:
                self._val_perm = np.arange(n_images)
                np.random.shuffle(self._val_perm)

            index_in_epoch = self._val_index_in_epoch
            perm = self._val_perm
        else:
            raise ValueError('Unknown imageset:{}'.format(image_set))

        # print "n_images:{}".format(n_images)
        # print "start:{}".format(start)

        # Go to the next epoch
        if start + batch_size <= n_images:
            index_in_epoch += batch_size
            end = index_in_epoch
            db_inds = perm[start:end]
            # print "end0:{}".format(end)
            # print '1', db_inds, len(db_inds)
        else:
            # Finished epoch
            if image_set == 'train':
                self._train_epochs_completed += 1
            elif image_set == 'val':
                self._val_epochs_completed += 1
            else:
                raise ValueError('Unknown imageset:{}'.format(image_set))

            # Get the rest examples in this epoch
            rest_num_images = n_images - start
            db_inds0 = perm[start:n_images]

            # Shuffle the data
            if shuffle:
                perm = np.arange(n_images)
                np.random.shuffle(perm)

            # Start next epoch
            start = 0
            index_in_epoch = batch_size - rest_num_images
            end = index_in_epoch
            db_inds = np.append(db_inds0, perm[start:end])

            # print '2', db_inds, len(db_inds)
            # print "end1:{}".format(end)
            assert len(db_inds) == batch_size

        if image_set == 'train':
            self._train_index_in_epoch = index_in_epoch
            self._train_perm = perm
        elif image_set == 'val':
            self._val_index_in_epoch = index_in_epoch
            self._val_perm = perm
        else:
            raise ValueError('Unknown imageset:{}'.format(image_set))

        # print "max ind:{}".format(max(db_inds))
        assert max(db_inds) < n_images, "db_inds wrong:{} from perm：{}".format(db_inds, perm)

        return db_inds

    # each time an image is requested from train/val/test, a new random
    # painting of the text is performed
    def get_batch(self, db_inds, image_set):
        # width and height are backwards from typical Keras convention
        # because width is the time dimension when it gets fed into the RNN
        size = len(db_inds)
        if image_set == 'train':
            minibatch_size = self.train_minibatch_size
        elif image_set == 'val':
            minibatch_size = self.val_minibatch_size
        else:
            raise ValueError('Unknown imageset:{}'.format(image_set))

        assert size == minibatch_size, "db_inds length must equal with train_minibatch_size"

        if K.image_data_format() == 'channels_first':
            X_data = np.ones([size, n_channel, self.img_w, self.img_h])
        else:
            X_data = np.ones([size, self.img_w, self.img_h, n_channel])

        labels = np.ones([size, self.absolute_max_string_len])
        input_length = np.zeros([size, 1])
        label_length = np.zeros([size, 1])
        source_str = []
        # print "get_bimage_set:{}, max_ind:{}, db_inds:{}".format(image_set, max(db_inds), db_inds)
        for i, db_ind in enumerate(db_inds):
            # Mix in some blank inputs.  This seems to be important for
            # achieving translational invariance
            if image_set == 'train' and i > size - 4:
                if K.image_data_format() == 'channels_first':
                    blank_img = np.zeros((n_channel, self.img_w, self.img_h), dtype=np.float32)
                    X_data[i, :, 0:self.img_w, :] = speckle(blank_img)
                else:
                    blank_img = np.zeros((self.img_w, self.img_h, n_channel), dtype=np.float32)
                    X_data[i, 0:self.img_w, :, :] = speckle(blank_img)
                labels[i, 0] = self.blank_label
                input_length[i] = self.img_w // self.downsample_factor - 2
                label_length[i] = 1
                source_str.append('')
            else:
                if K.image_data_format() == 'channels_first':
                    X_data[i, :, 0:self.img_w, :] = self.get_image(db_ind, image_set)
                else:
                    X_data[i, 0:self.img_w, :, :] = self.get_image(db_ind, image_set)

                Y_data = np.ones([self.absolute_max_string_len]) * -1

                if image_set == 'train':
                    word = self._train_label_index[db_ind]
                elif image_set == 'val':
                    try:
                        word = self._val_label_index[db_ind]
                    except:
                        print "{}, error:{}".format(image_set, db_ind)

                else:
                    raise ValueError('Unknown imageset:{}'.format(image_set))

                Y_data[0:len(word)] = self.text_to_labels(word)

                labels[i, :] = Y_data
                input_length[i] = self.img_w // self.downsample_factor - 2
                label_length[i] = len(word)
                source_str.append(word)

        inputs = {'the_input': X_data,  # [minibatch, 0, img_w, img_h]
                  'the_labels': labels,  # [minibatch, len(true_text)] each element is true_text
                  'input_length': input_length,  # [minibatch] each element is the input size to ctc
                  'label_length': label_length,  # [minibatch] each element is len(true_text)
                  'source_str': source_str  # used for visualization only
                  }
        outputs = {'ctc': np.zeros([size])}  # dummy data for dummy loss function
        if DEBUG:
            print 'the_input:{}'.format(X_data.shape)
            print 'the_labels:{},{}'.format(len(labels), labels[0])
            print 'input_length:{}'.format(np.array(input_length).ravel())
            print 'label_length{}'.format(np.array(label_length).ravel())
            print 'source_str:{}'.format(source_str[0].encode('utf-8'))

        return inputs, outputs

    def next_train(self):
        while 1:
            # if self._train_epochs_completed == 0:
            #     print "\ntrain_n_classes:{}".format(self._num_classes)

            # print 'train_index_in_epoch:', self._train_index_in_epoch
            # print 'train_epochs_completed:', self._train_epochs_completed
            db_inds = self._get_next_minibatch_inds(self.train_minibatch_size, 'train', shuffle=True)
            # print 'train_db_inds:', db_inds
            ret = self.get_batch(db_inds, 'train')
            yield ret

    def next_val(self):
        while 1:
            # if self._val_epochs_completed == 0:
            #     print "\nval_n_classes:{}".format(self._num_classes)
            # print 'val_index_in_epoch:', self._val_index_in_epoch
            # print 'val_epochs_completed:', self._val_epochs_completed
            db_inds = self._get_next_minibatch_inds(self.val_minibatch_size, 'val', shuffle=True)
            # print 'val_db_inds:', db_inds
            ret = self.get_batch(db_inds, 'val')
            yield ret


if __name__ == '__main__':
    # Input Parameters
    img_h = 32
    img_w = 128
    # Network parameters
    conv_filters = 16
    kernel_size = (3, 3)
    pool_size = 2
    time_dense_size = 64
    rnn_size = 512
    train_minibatch_size = 64
    val_minibatch_size = 32

    if K.image_data_format() == 'channels_first':
        input_shape = (n_channel, img_w, img_h)
    else:
        input_shape = (img_w, img_h, n_channel)

    dataset = '/home/hsp/jr/work/ocr/data'
    img_gen = DatasetGenerator(dataset=dataset,
                               train_minibatch_size=train_minibatch_size,
                               val_minibatch_size=val_minibatch_size,
                               img_w=img_w,
                               img_h=img_h,
                               downsample_factor=(pool_size ** 2),
                               absolute_max_string_len=20)

    for i in xrange(100):
        print "------------------------{}-------------------------".format(i)
        inputs, outputs = next(img_gen.next_train())
        inputs_val, outputs_val = next(img_gen.next_val())
    for key, value in inputs.items():
        print "train:" + key + ':'
        print value, len(value)
    for key, value in inputs_val.items():
        print "val:" + key + ':'
        print value

    print img_gen.num_classes
    print img_gen.train_label_index
    print img_gen.val_label_index
    print img_gen.train_num_images
    print img_gen.val_num_images
