from __future__ import print_function

from keras.models import Sequential, Graph
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Activation, Dense, Flatten, Dropout, Reshape
from keras.layers.recurrent import LSTM
from keras.regularizers import l2
from keras import backend as K

from keras_extras.extra import TimeDistributedConvolution2D, TimeDistributedMaxPooling2D, TimeDistributedFlatten


def scale(x):
    return (x - K.mean(x)) / K.std(x)
    # return x


def get_model():

    conv = Sequential()
    conv.add(Activation(activation=scale, input_shape=(30, 1, 128, 128)))
    conv.add(TimeDistributedConvolution2D(64, 3, 3, border_mode='same'))
    conv.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(64, 3, 3))
    # model.add(Activation('relu'))
    conv.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    conv.add(TimeDistributedConvolution2D(128, 3, 3, border_mode='same'))
    conv.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(128, 3, 3))
    # model.add(Activation('relu'))
    conv.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    conv.add(TimeDistributedConvolution2D(256, 3, 3, border_mode='same'))
    conv.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(256, 3, 3))
    # model.add(Activation('relu'))
    conv.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    conv.add(TimeDistributedConvolution2D(512, 3, 3, border_mode='same'))
    conv.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(512, 3, 3))
    # model.add(Activation('relu'))
    conv.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    conv.add(Activation('relu'))
    conv.add(TimeDistributedConvolution2D(512, 3, 3, border_mode='same'))
    conv.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(512, 3, 3))
    # model.add(Activation('relu'))
    conv.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    conv.add(Activation('relu'))
    conv.add(TimeDistributedConvolution2D(512, 3, 3, border_mode='same'))
    conv.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(512, 3, 3))
    # model.add(Activation('relu'))
    conv.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    conv.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(512, 3, 3, border_mode='same'))
    # model.add(Activation('relu'))
    # # model.add(TimeDistributedConvolution2D(512, 3, 3))
    # # model.add(Activation('relu'))
    # model.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    # model.add(Activation('relu'))
    # model.add(TimeDistributedConvolution2D(512, 3, 3, border_mode='same'))
    # model.add(Activation('relu'))
    # # model.add(TimeDistributedConvolution2D(512, 3, 3))
    # # model.add(Activation('relu'))
    # model.add(TimeDistributedMaxPooling2D(pool_size=(2, 2)))
    # model.add(Activation('relu'))
    conv.add(TimeDistributedFlatten())
    conv.add(LSTM(512, return_sequences=True))
    conv.add(Dropout(0.5))
    #model.add(LSTM(512, return_sequences=False))

    meta = Sequential()
    meta.add(Dense(512, input_dim=4))
    meta.add(Activation('relu'))

    model = Graph()
    model.add_input(name='conv_input', input_shape=(30, 1, 128, 128))
    model.add_input(name='meta_input', input_shape=(4,))
    model.add_node(conv, name='conv', input='conv_input')
    model.add_node(meta, name='meta', input='meta_input')
    model.add_node(Dense(1536, W_regularizer=l2(1e-3)), name='merge', inputs=['conv', 'meta'], merge_mode='concat')

    model.add_node(Activation('relu'), name='merge_act', input='merge')
    model.add_node(Dropout(0.6), name='merge_do', input='merge_act')
    model.add_node(Dense(1), name='merge_out', input='merge_do')
    model.add_output(name='output', input='merge_out')
    return model
