#encoding=utf8
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding

# define documents
from keras_preprocessing.text import one_hot


def play_embedding():
    docs = ['Well done!',
            'Good work',
            'Great effort',
            'nice work',
            'Excellent!',
            'Weak',
            'Poor effort!',
            'not good',
            'poor work',
            'Could have done better.']

    # define class labels
    labels = [1,1,1,1,1,0,0,0,0,0]

    # integer encode the documents
    vocab_size = 50
    encoded_docs = [one_hot(d, vocab_size) for d in docs]
    print(encoded_docs)

    # pad documents to a max length of 4 words
    max_length = 4
    padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
    print(padded_docs)

    # define the model
    model = Sequential()
    model.add(Embedding(vocab_size, 8, input_length=max_length))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    # compile the model
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])

    # summarize the model
    print(model.summary())

    # fit the model
    model.fit(padded_docs, labels, epochs=50, verbose=0)

    # evaluate the model
    loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
    print('Accuracy: %f' % (accuracy*100))

def play_softmax():
    model = Sequential()
    model.add(Dense(32,input_dim=100,activation="relu"))
    model.add(Dense(10,activation="softmax"))
    model.compile(optimizer="rmsprop",loss="categorical_crossentropy",metrics=['accuracy'])
    import numpy as np
    import keras
    data = np.random.random((1000,100))
    labels = np.random.randint(10,size=(1000,1))
    one_hot_label = keras.utils.to_categorical(labels,num_classes=10)
    model.fit(data,one_hot_label,epochs=6,batch_size=100)

if __name__ == "__main__":
    play_softmax()