{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1000, 1)\n",
      "Epoch 1/100\n",
      "1000/1000 [==============================] - 1s 526us/step - loss: 0.7044 - acc: 0.5020\n",
      "Epoch 2/100\n",
      "1000/1000 [==============================] - 0s 99us/step - loss: 0.6932 - acc: 0.5310\n",
      "Epoch 3/100\n",
      "1000/1000 [==============================] - 0s 105us/step - loss: 0.6845 - acc: 0.5470\n",
      "Epoch 4/100\n",
      "1000/1000 [==============================] - 0s 102us/step - loss: 0.6796 - acc: 0.5610\n",
      "Epoch 5/100\n",
      "1000/1000 [==============================] - 0s 87us/step - loss: 0.6759 - acc: 0.5650\n",
      "Epoch 6/100\n",
      "1000/1000 [==============================] - 0s 87us/step - loss: 0.6695 - acc: 0.5950\n",
      "Epoch 7/100\n",
      "1000/1000 [==============================] - 0s 89us/step - loss: 0.6679 - acc: 0.6050\n",
      "Epoch 8/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.6624 - acc: 0.6030\n",
      "Epoch 9/100\n",
      "1000/1000 [==============================] - 0s 93us/step - loss: 0.6590 - acc: 0.6100\n",
      "Epoch 10/100\n",
      "1000/1000 [==============================] - 0s 95us/step - loss: 0.6569 - acc: 0.6160\n",
      "Epoch 11/100\n",
      "1000/1000 [==============================] - 0s 85us/step - loss: 0.6540 - acc: 0.6170\n",
      "Epoch 12/100\n",
      "1000/1000 [==============================] - 0s 91us/step - loss: 0.6496 - acc: 0.6150\n",
      "Epoch 13/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.6447 - acc: 0.6310\n",
      "Epoch 14/100\n",
      "1000/1000 [==============================] - 0s 105us/step - loss: 0.6451 - acc: 0.6340\n",
      "Epoch 15/100\n",
      "1000/1000 [==============================] - 0s 98us/step - loss: 0.6391 - acc: 0.6320\n",
      "Epoch 16/100\n",
      "1000/1000 [==============================] - 0s 92us/step - loss: 0.6385 - acc: 0.6400\n",
      "Epoch 17/100\n",
      "1000/1000 [==============================] - 0s 83us/step - loss: 0.6373 - acc: 0.6340\n",
      "Epoch 18/100\n",
      "1000/1000 [==============================] - 0s 109us/step - loss: 0.6310 - acc: 0.6490\n",
      "Epoch 19/100\n",
      "1000/1000 [==============================] - 0s 90us/step - loss: 0.6271 - acc: 0.6400\n",
      "Epoch 20/100\n",
      "1000/1000 [==============================] - 0s 98us/step - loss: 0.6268 - acc: 0.6400\n",
      "Epoch 21/100\n",
      "1000/1000 [==============================] - 0s 91us/step - loss: 0.6242 - acc: 0.6580\n",
      "Epoch 22/100\n",
      "1000/1000 [==============================] - 0s 91us/step - loss: 0.6185 - acc: 0.6560\n",
      "Epoch 23/100\n",
      "1000/1000 [==============================] - 0s 81us/step - loss: 0.6149 - acc: 0.6640\n",
      "Epoch 24/100\n",
      "1000/1000 [==============================] - 0s 104us/step - loss: 0.6101 - acc: 0.6800\n",
      "Epoch 25/100\n",
      "1000/1000 [==============================] - 0s 101us/step - loss: 0.6090 - acc: 0.6640\n",
      "Epoch 26/100\n",
      "1000/1000 [==============================] - 0s 97us/step - loss: 0.6040 - acc: 0.6720\n",
      "Epoch 27/100\n",
      "1000/1000 [==============================] - 0s 86us/step - loss: 0.6011 - acc: 0.6860\n",
      "Epoch 28/100\n",
      "1000/1000 [==============================] - 0s 85us/step - loss: 0.5983 - acc: 0.6790\n",
      "Epoch 29/100\n",
      "1000/1000 [==============================] - 0s 84us/step - loss: 0.5944 - acc: 0.6940\n",
      "Epoch 30/100\n",
      "1000/1000 [==============================] - 0s 91us/step - loss: 0.5942 - acc: 0.6940\n",
      "Epoch 31/100\n",
      "1000/1000 [==============================] - 0s 97us/step - loss: 0.5891 - acc: 0.6860\n",
      "Epoch 32/100\n",
      "1000/1000 [==============================] - 0s 80us/step - loss: 0.5835 - acc: 0.7080\n",
      "Epoch 33/100\n",
      "1000/1000 [==============================] - 0s 99us/step - loss: 0.5816 - acc: 0.7060\n",
      "Epoch 34/100\n",
      "1000/1000 [==============================] - 0s 102us/step - loss: 0.5799 - acc: 0.7030\n",
      "Epoch 35/100\n",
      "1000/1000 [==============================] - 0s 96us/step - loss: 0.5725 - acc: 0.7070\n",
      "Epoch 36/100\n",
      "1000/1000 [==============================] - 0s 120us/step - loss: 0.5677 - acc: 0.7200\n",
      "Epoch 37/100\n",
      "1000/1000 [==============================] - 0s 81us/step - loss: 0.5663 - acc: 0.7260\n",
      "Epoch 38/100\n",
      "1000/1000 [==============================] - 0s 95us/step - loss: 0.5575 - acc: 0.7290\n",
      "Epoch 39/100\n",
      "1000/1000 [==============================] - 0s 91us/step - loss: 0.5576 - acc: 0.7370\n",
      "Epoch 40/100\n",
      "1000/1000 [==============================] - 0s 105us/step - loss: 0.5555 - acc: 0.7260\n",
      "Epoch 41/100\n",
      "1000/1000 [==============================] - 0s 92us/step - loss: 0.5495 - acc: 0.7400\n",
      "Epoch 42/100\n",
      "1000/1000 [==============================] - 0s 93us/step - loss: 0.5436 - acc: 0.7580\n",
      "Epoch 43/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.5386 - acc: 0.7510\n",
      "Epoch 44/100\n",
      "1000/1000 [==============================] - 0s 107us/step - loss: 0.5376 - acc: 0.7660\n",
      "Epoch 45/100\n",
      "1000/1000 [==============================] - 0s 103us/step - loss: 0.5313 - acc: 0.7650\n",
      "Epoch 46/100\n",
      "1000/1000 [==============================] - 0s 113us/step - loss: 0.5286 - acc: 0.7650\n",
      "Epoch 47/100\n",
      "1000/1000 [==============================] - 0s 103us/step - loss: 0.5241 - acc: 0.7740\n",
      "Epoch 48/100\n",
      "1000/1000 [==============================] - 0s 113us/step - loss: 0.5166 - acc: 0.7710\n",
      "Epoch 49/100\n",
      "1000/1000 [==============================] - 0s 94us/step - loss: 0.5127 - acc: 0.7790\n",
      "Epoch 50/100\n",
      "1000/1000 [==============================] - 0s 100us/step - loss: 0.5132 - acc: 0.7800\n",
      "Epoch 51/100\n",
      "1000/1000 [==============================] - 0s 105us/step - loss: 0.5063 - acc: 0.7710\n",
      "Epoch 52/100\n",
      "1000/1000 [==============================] - 0s 99us/step - loss: 0.5012 - acc: 0.8020\n",
      "Epoch 53/100\n",
      "1000/1000 [==============================] - 0s 85us/step - loss: 0.4983 - acc: 0.8000\n",
      "Epoch 54/100\n",
      "1000/1000 [==============================] - 0s 97us/step - loss: 0.4931 - acc: 0.7960\n",
      "Epoch 55/100\n",
      "1000/1000 [==============================] - 0s 99us/step - loss: 0.4866 - acc: 0.8000\n",
      "Epoch 56/100\n",
      "1000/1000 [==============================] - 0s 106us/step - loss: 0.4833 - acc: 0.8020\n",
      "Epoch 57/100\n",
      "1000/1000 [==============================] - 0s 84us/step - loss: 0.4763 - acc: 0.8120\n",
      "Epoch 58/100\n",
      "1000/1000 [==============================] - 0s 90us/step - loss: 0.4727 - acc: 0.8040\n",
      "Epoch 59/100\n",
      "1000/1000 [==============================] - 0s 83us/step - loss: 0.4717 - acc: 0.8140\n",
      "Epoch 60/100\n",
      "1000/1000 [==============================] - 0s 94us/step - loss: 0.4629 - acc: 0.8220\n",
      "Epoch 61/100\n",
      "1000/1000 [==============================] - 0s 79us/step - loss: 0.4624 - acc: 0.8160\n",
      "Epoch 62/100\n",
      "1000/1000 [==============================] - 0s 81us/step - loss: 0.4536 - acc: 0.8310\n",
      "Epoch 63/100\n",
      "1000/1000 [==============================] - 0s 98us/step - loss: 0.4508 - acc: 0.8300\n",
      "Epoch 64/100\n",
      "1000/1000 [==============================] - 0s 103us/step - loss: 0.4469 - acc: 0.8380\n",
      "Epoch 65/100\n",
      "1000/1000 [==============================] - 0s 118us/step - loss: 0.4411 - acc: 0.8310\n",
      "Epoch 66/100\n",
      "1000/1000 [==============================] - 0s 103us/step - loss: 0.4358 - acc: 0.8490\n",
      "Epoch 67/100\n",
      "1000/1000 [==============================] - 0s 93us/step - loss: 0.4319 - acc: 0.8490\n",
      "Epoch 68/100\n",
      "1000/1000 [==============================] - 0s 93us/step - loss: 0.4263 - acc: 0.8430\n",
      "Epoch 69/100\n",
      "1000/1000 [==============================] - 0s 83us/step - loss: 0.4202 - acc: 0.8580\n",
      "Epoch 70/100\n",
      "1000/1000 [==============================] - 0s 89us/step - loss: 0.4210 - acc: 0.8530\n",
      "Epoch 71/100\n",
      "1000/1000 [==============================] - 0s 97us/step - loss: 0.4131 - acc: 0.8680\n",
      "Epoch 72/100\n",
      "1000/1000 [==============================] - 0s 93us/step - loss: 0.4075 - acc: 0.8620\n",
      "Epoch 73/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.4027 - acc: 0.8720\n",
      "Epoch 74/100\n",
      "1000/1000 [==============================] - 0s 107us/step - loss: 0.3990 - acc: 0.8850\n",
      "Epoch 75/100\n",
      "1000/1000 [==============================] - 0s 105us/step - loss: 0.3967 - acc: 0.8810\n",
      "Epoch 76/100\n",
      "1000/1000 [==============================] - 0s 105us/step - loss: 0.3921 - acc: 0.8750\n",
      "Epoch 77/100\n",
      "1000/1000 [==============================] - 0s 119us/step - loss: 0.3878 - acc: 0.8820\n",
      "Epoch 78/100\n",
      "1000/1000 [==============================] - 0s 118us/step - loss: 0.3808 - acc: 0.8910\n",
      "Epoch 79/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.3770 - acc: 0.8870\n",
      "Epoch 80/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.3710 - acc: 0.8970\n",
      "Epoch 81/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.3688 - acc: 0.8920\n",
      "Epoch 82/100\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1000/1000 [==============================] - 0s 83us/step - loss: 0.3647 - acc: 0.8940\n",
      "Epoch 83/100\n",
      "1000/1000 [==============================] - 0s 97us/step - loss: 0.3566 - acc: 0.8890\n",
      "Epoch 84/100\n",
      "1000/1000 [==============================] - 0s 87us/step - loss: 0.3513 - acc: 0.9040\n",
      "Epoch 85/100\n",
      "1000/1000 [==============================] - 0s 77us/step - loss: 0.3502 - acc: 0.9010\n",
      "Epoch 86/100\n",
      "1000/1000 [==============================] - 0s 123us/step - loss: 0.3482 - acc: 0.9020\n",
      "Epoch 87/100\n",
      "1000/1000 [==============================] - 0s 129us/step - loss: 0.3375 - acc: 0.9150\n",
      "Epoch 88/100\n",
      "1000/1000 [==============================] - 0s 89us/step - loss: 0.3368 - acc: 0.9170\n",
      "Epoch 89/100\n",
      "1000/1000 [==============================] - 0s 104us/step - loss: 0.3369 - acc: 0.9070\n",
      "Epoch 90/100\n",
      "1000/1000 [==============================] - 0s 89us/step - loss: 0.3282 - acc: 0.9220\n",
      "Epoch 91/100\n",
      "1000/1000 [==============================] - 0s 93us/step - loss: 0.3274 - acc: 0.9140\n",
      "Epoch 92/100\n",
      "1000/1000 [==============================] - 0s 80us/step - loss: 0.3213 - acc: 0.9200\n",
      "Epoch 93/100\n",
      "1000/1000 [==============================] - 0s 86us/step - loss: 0.3151 - acc: 0.9190\n",
      "Epoch 94/100\n",
      "1000/1000 [==============================] - 0s 94us/step - loss: 0.3102 - acc: 0.9340\n",
      "Epoch 95/100\n",
      "1000/1000 [==============================] - 0s 88us/step - loss: 0.3120 - acc: 0.9160\n",
      "Epoch 96/100\n",
      "1000/1000 [==============================] - 0s 101us/step - loss: 0.2969 - acc: 0.9350\n",
      "Epoch 97/100\n",
      "1000/1000 [==============================] - 0s 100us/step - loss: 0.2993 - acc: 0.9390\n",
      "Epoch 98/100\n",
      "1000/1000 [==============================] - 0s 115us/step - loss: 0.2985 - acc: 0.9410\n",
      "Epoch 99/100\n",
      "1000/1000 [==============================] - 0s 97us/step - loss: 0.2913 - acc: 0.9400\n",
      "Epoch 100/100\n",
      "1000/1000 [==============================] - 0s 89us/step - loss: 0.2882 - acc: 0.9350\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x7f788837d438>"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from keras.models import Sequential\n",
    "from keras.layers import Dense, Activation\n",
    "model = Sequential()\n",
    "model.add(Dense(32, activation='relu', input_dim=100))\n",
    "model.add(Dense(1, activation='sigmoid'))\n",
    "model.compile(optimizer='rmsprop',\n",
    "              loss='binary_crossentropy',\n",
    "              metrics=['accuracy'])\n",
    "\n",
    "# Generate dummy data\n",
    "import numpy as np\n",
    "data = np.random.random((1000, 100))\n",
    "labels = np.random.randint(2, size=(1000, 1))\n",
    "print(np.shape(labels))\n",
    "# Train the model, iterating on the data in batches of 32 samples\n",
    "model.fit(data, labels, epochs=100, batch_size=32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(1000, 100)\n",
      "<class 'numpy.ndarray'>\n",
      "Epoch 1/10\n",
      "1000/1000 [==============================] - 1s 720us/step - loss: 2.3631 - acc: 0.0960\n",
      "Epoch 2/10\n",
      "1000/1000 [==============================] - 0s 86us/step - loss: 2.3169 - acc: 0.1040\n",
      "Epoch 3/10\n",
      "1000/1000 [==============================] - 0s 96us/step - loss: 2.3021 - acc: 0.1150\n",
      "Epoch 4/10\n",
      "1000/1000 [==============================] - 0s 115us/step - loss: 2.2901 - acc: 0.1280\n",
      "Epoch 5/10\n",
      "1000/1000 [==============================] - 0s 94us/step - loss: 2.2818 - acc: 0.1350\n",
      "Epoch 6/10\n",
      "1000/1000 [==============================] - 0s 117us/step - loss: 2.2723 - acc: 0.1460\n",
      "Epoch 7/10\n",
      "1000/1000 [==============================] - 0s 131us/step - loss: 2.2648 - acc: 0.1520\n",
      "Epoch 8/10\n",
      "1000/1000 [==============================] - 0s 131us/step - loss: 2.2572 - acc: 0.1590\n",
      "Epoch 9/10\n",
      "1000/1000 [==============================] - 0s 120us/step - loss: 2.2489 - acc: 0.1690\n",
      "Epoch 10/10\n",
      "1000/1000 [==============================] - 0s 100us/step - loss: 2.2397 - acc: 0.1620\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x7f766894c898>"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import keras\n",
    "model = Sequential()\n",
    "model.add(Dense(32, activation='relu', input_dim=100))\n",
    "model.add(Dense(10, activation='softmax'))\n",
    "model.compile(optimizer='rmsprop',\n",
    "              loss='categorical_crossentropy',\n",
    "              metrics=['accuracy'])\n",
    "\n",
    "# Generate dummy data\n",
    "import numpy as np\n",
    "data = np.random.random((1000, 100))\n",
    "labels = np.random.randint(10, size=(1000, 1))\n",
    "print(np.shape(data))\n",
    "# Convert labels to categorical one-hot encoding\n",
    "one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)\n",
    "print((type(one_hot_labels)))\n",
    "# Train the model, iterating on the data in batches of 32 samples\n",
    "model.fit(data, one_hot_labels, epochs=10, batch_size=32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[[0.35509893 0.51233105 0.20144143]\n",
      "  [0.99962804 0.0066869  0.90981903]\n",
      "  [0.14554155 0.22336358 0.24035474]\n",
      "  ...\n",
      "  [0.07594434 0.21283451 0.50692172]\n",
      "  [0.40381429 0.07239479 0.29684215]\n",
      "  [0.32194109 0.05292316 0.99415227]]\n",
      "\n",
      " [[0.76032418 0.99709771 0.04443819]\n",
      "  [0.4283055  0.94451469 0.49182717]\n",
      "  [0.51312902 0.24392922 0.54612605]\n",
      "  ...\n",
      "  [0.88472249 0.90006223 0.49265764]\n",
      "  [0.35348726 0.15200632 0.23856056]\n",
      "  [0.41242372 0.34151141 0.47218939]]\n",
      "\n",
      " [[0.39458278 0.07517516 0.79896676]\n",
      "  [0.2579382  0.60257243 0.13405307]\n",
      "  [0.30052569 0.14470888 0.82045887]\n",
      "  ...\n",
      "  [0.43038759 0.22140297 0.45005627]\n",
      "  [0.93166166 0.2279163  0.20655027]\n",
      "  [0.58758007 0.91301512 0.22507337]]\n",
      "\n",
      " ...\n",
      "\n",
      " [[0.37785374 0.78220051 0.02948516]\n",
      "  [0.18935976 0.14758568 0.64837992]\n",
      "  [0.72935765 0.90367468 0.41167399]\n",
      "  ...\n",
      "  [0.0451845  0.92544133 0.07173167]\n",
      "  [0.84385089 0.96028814 0.77052424]\n",
      "  [0.63530287 0.64615603 0.15229267]]\n",
      "\n",
      " [[0.14641363 0.36404463 0.50057129]\n",
      "  [0.51378986 0.27870303 0.01793441]\n",
      "  [0.41595899 0.17571813 0.89771051]\n",
      "  ...\n",
      "  [0.13488767 0.0862887  0.49504059]\n",
      "  [0.05816022 0.93809381 0.37760614]\n",
      "  [0.14237741 0.33951164 0.32886678]]\n",
      "\n",
      " [[0.14728134 0.75411469 0.99479653]\n",
      "  [0.04462523 0.68775662 0.59604997]\n",
      "  [0.78839153 0.22896796 0.86310221]\n",
      "  ...\n",
      "  [0.3144031  0.25221206 0.57587926]\n",
      "  [0.96554602 0.86815912 0.72551776]\n",
      "  [0.0548676  0.92982376 0.66553105]]]\n",
      "Epoch 1/10\n",
      "100/100 [==============================] - 2s 17ms/step - loss: 2.3609\n",
      "Epoch 2/10\n",
      "100/100 [==============================] - 1s 6ms/step - loss: 2.3030\n",
      "Epoch 3/10\n",
      "100/100 [==============================] - 1s 6ms/step - loss: 2.3153\n",
      "Epoch 4/10\n",
      "100/100 [==============================] - 1s 6ms/step - loss: 2.3160\n",
      "Epoch 5/10\n",
      "100/100 [==============================] - 1s 5ms/step - loss: 2.2848\n",
      "Epoch 6/10\n",
      "100/100 [==============================] - 1s 7ms/step - loss: 2.2800\n",
      "Epoch 7/10\n",
      "100/100 [==============================] - 1s 6ms/step - loss: 2.2966\n",
      "Epoch 8/10\n",
      "100/100 [==============================] - 1s 6ms/step - loss: 2.3086\n",
      "Epoch 9/10\n",
      "100/100 [==============================] - 1s 6ms/step - loss: 2.2981\n",
      "Epoch 10/10\n",
      "100/100 [==============================] - 1s 5ms/step - loss: 2.2871\n",
      "20/20 [==============================] - 0s 13ms/step\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import keras\n",
    "from keras.models import Sequential\n",
    "from keras.layers import Dense, Dropout, Flatten\n",
    "from keras.layers import Conv2D, MaxPooling2D\n",
    "from keras.optimizers import SGD\n",
    "\n",
    "# Generate dummy data\n",
    "x_train = np.random.random((100, 100, 100, 3))\n",
    "\n",
    "y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)\n",
    "x_test = np.random.random((20, 100, 100, 3))\n",
    "y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)\n",
    "\n",
    "model = Sequential()\n",
    "# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.\n",
    "# this applies 32 convolution filters of size 3x3 each.\n",
    "model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))\n",
    "model.add(Conv2D(32, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
    "model.add(Dropout(0.25))\n",
    "\n",
    "model.add(Conv2D(64, (3, 3), activation='relu'))\n",
    "model.add(Conv2D(64, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
    "model.add(Dropout(0.25))\n",
    "\n",
    "model.add(Flatten())\n",
    "model.add(Dense(256, activation='relu'))\n",
    "model.add(Dropout(0.5))\n",
    "model.add(Dense(10, activation='softmax'))\n",
    "\n",
    "sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n",
    "model.compile(loss='categorical_crossentropy', optimizer=sgd)\n",
    "\n",
    "model.fit(x_train, y_train, batch_size=32, epochs=10)\n",
    "score = model.evaluate(x_test, y_test, batch_size=32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0.83058547 0.21022561 0.75756909 ... 0.81888062 0.55739231 0.50196089]\n",
      " [0.59255148 0.25485721 0.79170688 ... 0.00637492 0.46680298 0.2677139 ]\n",
      " [0.49294116 0.23428845 0.3651189  ... 0.96108648 0.35980837 0.24739918]\n",
      " ...\n",
      " [0.26173503 0.39793571 0.29905779 ... 0.89036362 0.69655774 0.20347083]\n",
      " [0.92373776 0.94230026 0.00175105 ... 0.62647792 0.92114621 0.23187652]\n",
      " [0.90749982 0.28865667 0.33188777 ... 0.22875544 0.78686939 0.07082126]]\n",
      "Epoch 1/10\n",
      "1000/1000 [==============================] - 23s 23ms/step - loss: -48.2343 - acc: 0.0940\n",
      "Epoch 2/10\n",
      "1000/1000 [==============================] - 21s 21ms/step - loss: -54.5548 - acc: 0.0940\n",
      "Epoch 3/10\n",
      "1000/1000 [==============================] - 22s 22ms/step - loss: -54.6145 - acc: 0.0940\n",
      "Epoch 4/10\n",
      "1000/1000 [==============================] - 21s 21ms/step - loss: -54.6186 - acc: 0.0940\n",
      "Epoch 5/10\n",
      "1000/1000 [==============================] - 21s 21ms/step - loss: -54.6117 - acc: 0.0940\n",
      "Epoch 6/10\n",
      "1000/1000 [==============================] - 21s 21ms/step - loss: -54.6131 - acc: 0.0940\n",
      "Epoch 7/10\n",
      "1000/1000 [==============================] - 21s 21ms/step - loss: -54.6186 - acc: 0.0940\n",
      "Epoch 8/10\n",
      "1000/1000 [==============================] - 21s 21ms/step - loss: -54.6158 - acc: 0.0940\n",
      "Epoch 9/10\n",
      "1000/1000 [==============================] - 21s 21ms/step - loss: -54.6186 - acc: 0.0940\n",
      "Epoch 10/10\n",
      "1000/1000 [==============================] - 22s 22ms/step - loss: -54.6186 - acc: 0.0940\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "Error when checking input: expected embedding_5_input to have 2 dimensions, but got array with shape (20, 100, 100, 3)",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-28-2eda0a713303>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     19\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m16\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0mscore\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, x, y, batch_size, verbose, sample_weight, steps)\u001b[0m\n\u001b[1;32m   1098\u001b[0m             \u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1099\u001b[0m             \u001b[0msample_weight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msample_weight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1100\u001b[0;31m             batch_size=batch_size)\n\u001b[0m\u001b[1;32m   1101\u001b[0m         \u001b[0;31m# Prepare inputs, delegate logic to `test_loop`.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1102\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_uses_dynamic_learning_phase\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36m_standardize_user_data\u001b[0;34m(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)\u001b[0m\n\u001b[1;32m    747\u001b[0m             \u001b[0mfeed_input_shapes\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    748\u001b[0m             \u001b[0mcheck_batch_axis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m  \u001b[0;31m# Don't enforce the batch size.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 749\u001b[0;31m             exception_prefix='input')\n\u001b[0m\u001b[1;32m    750\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    751\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0my\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/keras/engine/training_utils.py\u001b[0m in \u001b[0;36mstandardize_input_data\u001b[0;34m(data, names, shapes, check_batch_axis, exception_prefix)\u001b[0m\n\u001b[1;32m    125\u001b[0m                         \u001b[0;34m': expected '\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mnames\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m' to have '\u001b[0m \u001b[0;34m+\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    126\u001b[0m                         \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m' dimensions, but got array '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 127\u001b[0;31m                         'with shape ' + str(data_shape))\n\u001b[0m\u001b[1;32m    128\u001b[0m                 \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mcheck_batch_axis\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    129\u001b[0m                     \u001b[0mdata_shape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata_shape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mValueError\u001b[0m: Error when checking input: expected embedding_5_input to have 2 dimensions, but got array with shape (20, 100, 100, 3)"
     ]
    }
   ],
   "source": [
    "from keras.models import Sequential\n",
    "from keras.layers import Dense, Dropout\n",
    "from keras.layers import Embedding\n",
    "from keras.layers import LSTM\n",
    "x_train = np.random.random((1000,100))\n",
    "print(x_train)\n",
    "y_train = np.random.randint(10, size=(1000, 1))\n",
    "max_features=100\n",
    "model = Sequential()\n",
    "model.add(Embedding(max_features, output_dim=256))\n",
    "model.add(LSTM(128))\n",
    "model.add(Dropout(0.5))\n",
    "model.add(Dense(1, activation='sigmoid'))\n",
    "\n",
    "model.compile(loss='binary_crossentropy',\n",
    "              optimizer='rmsprop',\n",
    "              metrics=['accuracy'])\n",
    "\n",
    "model.fit(x_train, y_train, batch_size=16, epochs=10)\n",
    "#score = model.evaluate(x_test, y_test, batch_size=16)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "input_array = np.random.randint(1000, size=(32, 10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(32, 10)"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.shape(input_array)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = Sequential()\n",
    "model.add(Embedding(1000, 64, input_length=10))\n",
    "# the model will take as input an integer matrix of size (batch, input_length).\n",
    "# the largest integer (i.e. word index) in the input should be no larger than 999 (vocabulary size).\n",
    "# now model.output_shape == (None, 10, 64), where None is the batch dimension.\n",
    "\n",
    "input_array = np.random.randint(1000, size=(32, 10))\n",
    "\n",
    "model.compile('rmsprop', 'mse')\n",
    "output_array = model.predict(input_array)\n",
    "assert output_array.shape == (32, 10, 64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[[-0.04500184 -0.01970725 -0.00953742 ... -0.02298778 -0.02077987\n",
      "   -0.01409354]\n",
      "  [-0.04436594 -0.02491027  0.00550988 ...  0.02500151  0.02646646\n",
      "   -0.0233621 ]\n",
      "  [-0.04746424 -0.01483731  0.02343686 ... -0.00502846 -0.00478787\n",
      "   -0.02038908]\n",
      "  ...\n",
      "  [-0.00446964  0.02333269  0.04115755 ...  0.00967966 -0.00925871\n",
      "    0.02962151]\n",
      "  [ 0.00578    -0.01918393 -0.02849885 ... -0.01344421  0.01880512\n",
      "    0.04046163]\n",
      "  [-0.03221711  0.02678839  0.03209445 ...  0.04094783  0.04918135\n",
      "    0.04926416]]\n",
      "\n",
      " [[-0.02956951  0.00153489 -0.04250584 ...  0.03026488 -0.03427526\n",
      "   -0.04028121]\n",
      "  [-0.02249992  0.00022874 -0.02191315 ...  0.02925037 -0.04634261\n",
      "    0.0281576 ]\n",
      "  [-0.015022    0.04217513 -0.00298848 ...  0.03961286  0.04850599\n",
      "   -0.01174487]\n",
      "  ...\n",
      "  [ 0.0391278  -0.02705768 -0.02704747 ...  0.00821186 -0.02644001\n",
      "   -0.01688623]\n",
      "  [ 0.03272783 -0.01134402  0.01990436 ...  0.04052139 -0.03238566\n",
      "   -0.02276134]\n",
      "  [-0.01256595 -0.01742699  0.02462817 ... -0.01976378 -0.02871933\n",
      "    0.03335825]]\n",
      "\n",
      " [[-0.01085303 -0.00859562  0.03061476 ... -0.03201013  0.02229278\n",
      "   -0.00040947]\n",
      "  [ 0.04568578 -0.02384825 -0.04715446 ... -0.03446347  0.00654227\n",
      "    0.01870871]\n",
      "  [-0.03678559  0.03444474  0.02749196 ... -0.00242338  0.03753159\n",
      "    0.04823175]\n",
      "  ...\n",
      "  [-0.02426746  0.00355179  0.01254309 ... -0.02674251  0.00706197\n",
      "   -0.01952015]\n",
      "  [ 0.01433656 -0.00691726 -0.02721478 ... -0.01923593 -0.01423086\n",
      "    0.00991955]\n",
      "  [ 0.03101671  0.02456944  0.03957106 ...  0.00579667  0.03534576\n",
      "   -0.03579266]]\n",
      "\n",
      " ...\n",
      "\n",
      " [[ 0.03848014  0.0378818   0.00772355 ...  0.04518135 -0.01373457\n",
      "    0.00426877]\n",
      "  [ 0.01007161  0.03225194 -0.02024194 ... -0.03480291 -0.03396866\n",
      "    0.03067756]\n",
      "  [-0.01131791 -0.00802735 -0.0306883  ...  0.02547428 -0.0326732\n",
      "    0.01776754]\n",
      "  ...\n",
      "  [-0.04218191 -0.03632576 -0.04928254 ... -0.04550457 -0.02556334\n",
      "   -0.04413868]\n",
      "  [-0.01653942 -0.02103608 -0.01419456 ... -0.00317425  0.01329372\n",
      "   -0.01271594]\n",
      "  [ 0.03969257 -0.02542006  0.04537349 ... -0.00620464 -0.01357381\n",
      "   -0.04938897]]\n",
      "\n",
      " [[ 0.02029473 -0.02890464  0.01644443 ...  0.03684414  0.0347071\n",
      "   -0.038642  ]\n",
      "  [ 0.03592901 -0.03812989 -0.02452654 ... -0.04800412 -0.04124557\n",
      "   -0.02073599]\n",
      "  [ 0.01533718  0.032183    0.02141172 ...  0.0145708  -0.00887119\n",
      "   -0.01884322]\n",
      "  ...\n",
      "  [-0.02949072 -0.03451384  0.04584143 ...  0.00382544  0.01146869\n",
      "    0.0356322 ]\n",
      "  [-0.04992468  0.01429767 -0.01997867 ...  0.02668966  0.00120602\n",
      "    0.02391184]\n",
      "  [ 0.04785017 -0.00071587  0.03890416 ...  0.00459441  0.02603314\n",
      "    0.02612717]]\n",
      "\n",
      " [[ 0.03650761 -0.03104782  0.03178755 ... -0.00205777 -0.00278015\n",
      "   -0.00125622]\n",
      "  [ 0.01864984 -0.03564513 -0.025721   ...  0.01406432  0.00706921\n",
      "    0.04348883]\n",
      "  [-0.03806282  0.00678202 -0.04980623 ...  0.03905075 -0.04716371\n",
      "   -0.01047158]\n",
      "  ...\n",
      "  [ 0.01143039 -0.02788148 -0.00960811 ...  0.02541864 -0.02995452\n",
      "   -0.01288588]\n",
      "  [-0.00906657  0.00282149  0.02446486 ... -0.01377632  0.02323413\n",
      "    0.00186298]\n",
      "  [-0.04143037  0.00035465  0.0076728  ... -0.00438095 -0.00010268\n",
      "    0.01887132]]]\n"
     ]
    }
   ],
   "source": [
    "print(output_array)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
