# coding: utf-8
'''
使用修正全连接LSTM进行31类别的分类
'''
import datetime
import os

from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import GRU, Input, multiply
from keras.layers import LSTM
from keras.layers.core import *
from keras.models import Sequential, Model
from keras.optimizers import rmsprop
from keras.regularizers import l2

TIME_STEPS = 20
VOCAB_SIZE = 10000


class PureRNN:

    def __init__(self, input_shape, class_num, rnn_layer_num, rnn_cell_num, hidden_num, keep_drop, lr, batch_size,
                 cell_type='lstm'):

        self.model = self._build(input_shape, class_num, rnn_layer_num, rnn_cell_num, hidden_num, keep_drop, lr,
                                 cell_type)
        self.id = datetime.datetime.now().strftime('%m_%d_%H_%M')

    def _build(self, input_shape, class_num, rnn_layer_num, rnn_cell_num, hidden_num, keep_drop, lr, cell_type):
        model = Sequential(name='PureRNN')
        if cell_type.lower() == 'lstm':
            cell = LSTM
        elif cell_type.lower() == 'gru':
            cell = GRU
        else:
            raise AttributeError('Cell type is only for lstm or gru')
        if rnn_layer_num < 2:
            model.add(cell(rnn_cell_num, input_shape=input_shape))

        else:
            model.add(cell(rnn_cell_num, return_sequences=True, input_shape=input_shape))

            for i in range(0, rnn_layer_num - 2):
                model.add(cell(rnn_cell_num, return_sequences=True))
                # model.add(GRU(cell_num, return_sequences=True))
            model.add(cell(rnn_cell_num))
            # model.add(GRU(cell_num))

        model.add(Dense(hidden_num))
        model.add(Activation('tanh'))

        model.add(Dropout(keep_drop))
        model.add(Dense(class_num))
        model.add(Activation('softmax'))

        _rmsprop = rmsprop(lr=lr)

        model.compile(loss='categorical_crossentropy', optimizer=_rmsprop,
                      metrics=['accuracy'])
        return model

    def train(self, x_train, y_train, x_val, y_val, batch_size=32, epochs=100, early_stop=True,
              train_dir='saved_model/pure_rnn'):
        callbacks = []
        os.makedirs(os.path.join(train_dir, self.id), exist_ok=True)
        callbacks.append(ModelCheckpoint(
            os.path.join(train_dir, self.id, 'weights.epoch_{epoch:02d}-val_acc_{val_acc:.4f}.hdf5'),
            monitor='val_acc',
            verbose=1, save_best_only=True, mode='max'))
        if early_stop:
            callbacks.append(EarlyStopping(monitor='val_acc', verbose=1, patience=20, mode='max'))
        self.model.fit(x_train, y_train, batch_size, nb_epoch=epochs, shuffle=True, validation_data=(x_val, y_val))

    def predict(self, x):
        return self.model.predict_classes(x)


class AttentionRNN:

    def __init__(self, input_shape, class_num, rnn_layer_num, rnn_cell_num, hidden_num, keep_drop, lr, batch_size,
                 cell_type='lstm', single_attention_vector=False):

        self.id = datetime.datetime.now().strftime('%m_%d_%H_%M')
        self.class_num = class_num
        self.single_attention_vector = single_attention_vector
        self.model = self._build(input_shape, class_num, rnn_layer_num, rnn_cell_num, hidden_num, keep_drop, lr,
                                 cell_type)


    def _build(self, input_shape, class_num, rnn_layer_num, rnn_cell_num, hidden_num, keep_drop, lr, cell_type):
        if cell_type.lower() == 'lstm':
            cell = LSTM
        elif cell_type.lower() == 'gru':
            cell = GRU
        else:
            raise AttributeError('Cell type is only for lstm or gru')

        inputs = Input(shape=input_shape)

        if rnn_layer_num < 2:
            lstm = cell(rnn_cell_num, input_shape=input_shape, return_sequences=True)(inputs)

        else:
            lstm = cell(rnn_cell_num, return_sequences=True, input_shape=input_shape)(inputs)

            for i in range(0, rnn_layer_num - 2):
                lstm = cell(rnn_cell_num, return_sequences=True)(lstm)
            lstm = cell(rnn_cell_num)(lstm)
        attention_mul = self._attention_3d_block(lstm)
        attention_mul = Flatten()(attention_mul)

        dense = Dense(hidden_num, kernel_regularizer=l2(0.03))(attention_mul)
        act1 = Activation('tanh')(dense)
        dropout = Dropout(keep_drop)(act1)
        output = Dense(class_num)(dropout)
        output = Activation('softmax')(output)
        model = Model(inputs=inputs, outputs=output)

        _rmsprop = rmsprop(lr=lr)
        model.compile(loss='categorical_crossentropy', optimizer=_rmsprop,
                      metrics=['accuracy'])
        return model

    def _attention_3d_block(self, inputs):
        # inputs.shape = (batch_size, time_steps, input_dim)
        input_dim = int(inputs.shape[2])
        a = Permute((2, 1))(inputs)
        a = Reshape((input_dim, TIME_STEPS))(a)  # this line is not useful. It's just to know which dimension is what.
        a = Dense(TIME_STEPS, activation='softmax')(a)
        if self.single_attention_vector:
            a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)
            a = RepeatVector(input_dim)(a)
        a_probs = Permute((2, 1), name='attention_vec')(a)
        output_attention_mul = multiply([inputs, a_probs])
        # output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')
        return output_attention_mul

    def train(self, x_train, y_train, x_val, y_val, batch_size=32, epochs=100, early_stop=True,
              train_dir='saved_model/attention_rnn'):
        callbacks = []
        os.makedirs(os.path.join(train_dir, self.id), exist_ok=True)
        callbacks.append(ModelCheckpoint(
            os.path.join(train_dir, self.id, 'weights.epoch_{epoch:02d}-val_acc_{val_acc:.4f}.hdf5'),
            monitor='val_acc',
            verbose=1, save_best_only=True, mode='max'))
        if early_stop:
            callbacks.append(EarlyStopping(monitor='val_acc', verbose=1, patience=20, mode='max'))
        self.model.fit(x_train, y_train, batch_size, nb_epoch=epochs, shuffle=True, validation_data=(x_val, y_val))

    def predict(self, x):
        pred = self.model.predict(x)
        pred = np.argmax(pred, axis=1)
        return pred

if __name__ == '__main__':
    model = AttentionRNN((TIME_STEPS, 40), 2, 1, 50, 20, 0.1, 0.001, 32, single_attention_vector=True)
