#! /usr/bin/env python3
from glob import glob
import os

from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

import numpy as np

np.random.seed(1)

import keras
from keras.layers import Input
from keras.layers import Conv2D, LeakyReLU, MaxPool2D
from keras.layers import Flatten, Dense, Reshape
from keras.layers import Conv2DTranspose
from keras.models import Model

from sklearn.cluster import AgglomerativeClustering

from tqdm import tqdm
import fire


def generatorFunc(images, batch_size=32, shuffle=True):
    batch_size = min(batch_size, len(images))
    while True:
        idxs = np.arange(len(images))
        if shuffle:
            idxs = np.random.permutation(len(images))
        times = idxs.shape[0] // batch_size
        for time in range(times):
            batch_idxl = time * batch_size
            batch_idxr = (time + 1) * batch_size
            batch_idxs = idxs[batch_idxl: batch_idxr]
            batch_data = [np.array(images[idx], dtype='uint8') for idx in batch_idxs]
            batch_ioo = np.array(batch_data, dtype='float32') / 255
            yield batch_ioo, batch_ioo

def imageAdaptiveClass(image_dir, classes_num, output='output.csv', output_weights='weights.h5', weights_file=None, train=True,
                       normal_size=(256, 256), batch_size=32, epochs=32, vect_width=256):
    print('load images...')
    image_paths = sorted(glob(os.path.join(image_dir, '*')))
    images = []
    for path in tqdm(image_paths, ncols=80):
        image = Image.open(path)
        image = image.resize(normal_size)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        images.append(image)

    print('build models...')
    input_shape = (normal_size[0], normal_size[1], 3)

    inputs = Input(input_shape)

    def convlrelu(x, f):
        x = LeakyReLU()(Conv2D(f, (3, 3), padding='same')(x))
        return MaxPool2D((2, 2))(x)

    def convtrans(x, f):
        x = Conv2DTranspose(f, (3, 3), strides=2, padding='same', activation='relu')(x)
        return x

    x = convlrelu(inputs, 5)
    x = convlrelu(x, 8)
    x = convlrelu(x, 13)
    x = convlrelu(x, 21)
    _, row, col, channels = x.get_shape()

    x = Flatten()(x)
    x = Dense(vect_width, activation='relu')(x)
    sample_model = Model(inputs=inputs, outputs=x)

    x = Dense(row.value * col.value * channels.value, activation='relu')(x)
    x = Reshape((row.value, col.value, channels.value))(x)
    x = convtrans(x, 13)
    x = convtrans(x, 8)
    x = convtrans(x, 5)

    outputs = Conv2DTranspose(3, (3, 3), strides=2, padding='same')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer='Nadam', loss='mse', metrics=['mae'])

    if weights_file is not None:
        print('load weights...')
        model.load_weights(weights_file)
    if train:
        print('train model...')
        generator = generatorFunc(images, batch_size=batch_size)
        model.fit_generator(generator, steps_per_epoch=len(images) // batch_size, epochs=epochs)
        model.save_weights(output_weights)

    generator = generatorFunc(images, batch_size=1, shuffle=False)
    sample_gs = sample_model.predict_generator(generator, steps=len(images), verbose=1)
    sample_vects = sample_gs.reshape(sample_gs.shape[0], -1)

    sample_vects_mean = sample_vects.mean()
    sample_vects_std = sample_vects.std()
    sample_vects_normal = (sample_vects - sample_vects_mean) / sample_vects_std

    output_classes = AgglomerativeClustering(n_clusters=classes_num).fit_predict(sample_vects_normal)

    with open(output, 'w') as f:
        f.writelines(['{}, {}\n'.format(path, int(label))
                      for path, label in zip(image_paths, output_classes)])
    print('processing finished!')

if __name__ == '__main__':
    fire.Fire(imageAdaptiveClass)
