nouamanetazi HF staff commited on
Commit
b800b58
1 Parent(s): e06c361

initial commit

Browse files
Files changed (2) hide show
  1. .gitignore +161 -0
  2. conv_lstm.ipynb +430 -0
.gitignore ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # tests and logs
10
+ tests/fixtures/cached_*_text.txt
11
+ logs/
12
+ lightning_logs/
13
+ lang_code_data/
14
+
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib/
24
+ lib64/
25
+ parts/
26
+ sdist/
27
+ var/
28
+ wheels/
29
+ *.egg-info/
30
+ .installed.cfg
31
+ *.egg
32
+ MANIFEST
33
+
34
+ # PyInstaller
35
+ # Usually these files are written by a python script from a template
36
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
37
+ *.manifest
38
+ *.spec
39
+
40
+ # Installer logs
41
+ pip-log.txt
42
+ pip-delete-this-directory.txt
43
+
44
+ # Unit test / coverage reports
45
+ htmlcov/
46
+ .tox/
47
+ .nox/
48
+ .coverage
49
+ .coverage.*
50
+ .cache
51
+ nosetests.xml
52
+ coverage.xml
53
+ *.cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
+ *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ .python-version
88
+
89
+ # celery beat schedule file
90
+ celerybeat-schedule
91
+
92
+ # SageMath parsed files
93
+ *.sage.py
94
+
95
+ # Environments
96
+ .env
97
+ .venv
98
+ env/
99
+ venv/
100
+ ENV/
101
+ env.bak/
102
+ venv.bak/
103
+
104
+ # Spyder project settings
105
+ .spyderproject
106
+ .spyproject
107
+
108
+ # Rope project settings
109
+ .ropeproject
110
+
111
+ # mkdocs documentation
112
+ /site
113
+
114
+ # mypy
115
+ .mypy_cache/
116
+ .dmypy.json
117
+ dmypy.json
118
+
119
+ # Pyre type checker
120
+ .pyre/
121
+
122
+ # vscode
123
+ .vs
124
+ .vscode
125
+
126
+ # Pycharm
127
+ .idea
128
+
129
+ # TF code
130
+ tensorflow_code
131
+
132
+ # Models
133
+ proc_data
134
+
135
+ # examples
136
+ runs
137
+ /runs_old
138
+ /wandb
139
+ /examples/runs
140
+ /examples/**/*.args
141
+ /examples/rag/sweep
142
+
143
+ # data
144
+ /data
145
+ serialization_dir
146
+
147
+ # emacs
148
+ *.*~
149
+ debug.env
150
+
151
+ # vim
152
+ .*.swp
153
+
154
+ #ctags
155
+ tags
156
+
157
+ # pre-commit
158
+ .pre-commit*
159
+
160
+ # .lock
161
+ *.lock
conv_lstm.ipynb ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "5iJqHKEQx66F"
7
+ },
8
+ "source": [
9
+ "# Next-Frame Video Prediction with Convolutional LSTMs\n",
10
+ "\n",
11
+ "**Author:** [Amogh Joshi](https://github.com/amogh7joshi)<br>\n",
12
+ "**Date created:** 2021/06/02<br>\n",
13
+ "**Last modified:** 2021/06/05<br>\n",
14
+ "**Description:** How to build and train a convolutional LSTM model for next-frame video prediction."
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "markdown",
19
+ "metadata": {
20
+ "id": "9vv8zp4vx66K"
21
+ },
22
+ "source": [
23
+ "## Introduction\n",
24
+ "\n",
25
+ "The\n",
26
+ "[Convolutional LSTM](https://papers.nips.cc/paper/2015/file/07563a3fe3bbe7e3ba84431ad9d055af-Paper.pdf)\n",
27
+ "architectures bring together time series processing and computer vision by\n",
28
+ "introducing a convolutional recurrent cell in a LSTM layer. In this example, we will explore the\n",
29
+ "Convolutional LSTM model in an application to next-frame prediction, the process\n",
30
+ "of predicting what video frames come next given a series of past frames."
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "markdown",
35
+ "metadata": {
36
+ "id": "daG-n305x66K"
37
+ },
38
+ "source": [
39
+ "## Setup"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "metadata": {
46
+ "id": "4Xx9qttUx66L"
47
+ },
48
+ "outputs": [],
49
+ "source": [
50
+ "import numpy as np\n",
51
+ "import matplotlib.pyplot as plt\n",
52
+ "\n",
53
+ "import tensorflow as tf\n",
54
+ "from tensorflow import keras\n",
55
+ "from tensorflow.keras import layers\n",
56
+ "\n",
57
+ "import io\n",
58
+ "import imageio\n",
59
+ "from IPython.display import Image, display\n",
60
+ "from ipywidgets import widgets, Layout, HBox"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "markdown",
65
+ "metadata": {
66
+ "id": "w-uOOdg1x66M"
67
+ },
68
+ "source": [
69
+ "## Dataset Construction\n",
70
+ "\n",
71
+ "For this example, we will be using the\n",
72
+ "[Moving MNIST](http://www.cs.toronto.edu/~nitish/unsupervised_video/)\n",
73
+ "dataset.\n",
74
+ "\n",
75
+ "We will download the dataset and then construct and\n",
76
+ "preprocess training and validation sets.\n",
77
+ "\n",
78
+ "For next-frame prediction, our model will be using a previous frame,\n",
79
+ "which we'll call `f_n`, to predict a new frame, called `f_(n + 1)`.\n",
80
+ "To allow the model to create these predictions, we'll need to process\n",
81
+ "the data such that we have \"shifted\" inputs and outputs, where the\n",
82
+ "input data is frame `x_n`, being used to predict frame `y_(n + 1)`."
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": null,
88
+ "metadata": {
89
+ "id": "H6_vt6q4x66N"
90
+ },
91
+ "outputs": [],
92
+ "source": [
93
+ "# Download and load the dataset.\n",
94
+ "fpath = keras.utils.get_file(\n",
95
+ " \"moving_mnist.npy\",\n",
96
+ " \"http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy\",\n",
97
+ ")\n",
98
+ "dataset = np.load(fpath)\n",
99
+ "\n",
100
+ "# Swap the axes representing the number of frames and number of data samples.\n",
101
+ "dataset = np.swapaxes(dataset, 0, 1)\n",
102
+ "# We'll pick out 1000 of the 10000 total examples and use those.\n",
103
+ "dataset = dataset[:1000, ...]\n",
104
+ "# Add a channel dimension since the images are grayscale.\n",
105
+ "dataset = np.expand_dims(dataset, axis=-1)\n",
106
+ "\n",
107
+ "# Split into train and validation sets using indexing to optimize memory.\n",
108
+ "indexes = np.arange(dataset.shape[0])\n",
109
+ "np.random.shuffle(indexes)\n",
110
+ "train_index = indexes[: int(0.9 * dataset.shape[0])]\n",
111
+ "val_index = indexes[int(0.9 * dataset.shape[0]) :]\n",
112
+ "train_dataset = dataset[train_index]\n",
113
+ "val_dataset = dataset[val_index]\n",
114
+ "\n",
115
+ "# Normalize the data to the 0-1 range.\n",
116
+ "train_dataset = train_dataset / 255\n",
117
+ "val_dataset = val_dataset / 255\n",
118
+ "\n",
119
+ "# We'll define a helper function to shift the frames, where\n",
120
+ "# `x` is frames 0 to n - 1, and `y` is frames 1 to n.\n",
121
+ "def create_shifted_frames(data):\n",
122
+ " x = data[:, 0 : data.shape[1] - 1, :, :]\n",
123
+ " y = data[:, 1 : data.shape[1], :, :]\n",
124
+ " return x, y\n",
125
+ "\n",
126
+ "\n",
127
+ "# Apply the processing function to the datasets.\n",
128
+ "x_train, y_train = create_shifted_frames(train_dataset)\n",
129
+ "x_val, y_val = create_shifted_frames(val_dataset)\n",
130
+ "\n",
131
+ "# Inspect the dataset.\n",
132
+ "print(\"Training Dataset Shapes: \" + str(x_train.shape) + \", \" + str(y_train.shape))\n",
133
+ "print(\"Validation Dataset Shapes: \" + str(x_val.shape) + \", \" + str(y_val.shape))"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "markdown",
138
+ "metadata": {
139
+ "id": "wJhm7oM7x66O"
140
+ },
141
+ "source": [
142
+ "## Data Visualization\n",
143
+ "\n",
144
+ "Our data consists of sequences of frames, each of which\n",
145
+ "are used to predict the upcoming frame. Let's take a look\n",
146
+ "at some of these sequential frames."
147
+ ]
148
+ },
149
+ {
150
+ "cell_type": "code",
151
+ "execution_count": null,
152
+ "metadata": {
153
+ "id": "jFE2fY1xx66O"
154
+ },
155
+ "outputs": [],
156
+ "source": [
157
+ "# Construct a figure on which we will visualize the images.\n",
158
+ "fig, axes = plt.subplots(4, 5, figsize=(10, 8))\n",
159
+ "\n",
160
+ "# Plot each of the sequential images for one random data example.\n",
161
+ "data_choice = np.random.choice(range(len(train_dataset)), size=1)[0]\n",
162
+ "for idx, ax in enumerate(axes.flat):\n",
163
+ " ax.imshow(np.squeeze(train_dataset[data_choice][idx]), cmap=\"gray\")\n",
164
+ " ax.set_title(f\"Frame {idx + 1}\")\n",
165
+ " ax.axis(\"off\")\n",
166
+ "\n",
167
+ "# Print information and display the figure.\n",
168
+ "print(f\"Displaying frames for example {data_choice}.\")\n",
169
+ "plt.show()"
170
+ ]
171
+ },
172
+ {
173
+ "cell_type": "markdown",
174
+ "metadata": {
175
+ "id": "jPQQIUm6x66P"
176
+ },
177
+ "source": [
178
+ "## Model Construction\n",
179
+ "\n",
180
+ "To build a Convolutional LSTM model, we will use the\n",
181
+ "`ConvLSTM2D` layer, which will accept inputs of shape\n",
182
+ "`(batch_size, num_frames, width, height, channels)`, and return\n",
183
+ "a prediction movie of the same shape."
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": null,
189
+ "metadata": {
190
+ "id": "D3OvRaVpx66P"
191
+ },
192
+ "outputs": [],
193
+ "source": [
194
+ "# Construct the input layer with no definite frame size.\n",
195
+ "inp = layers.Input(shape=(None, *x_train.shape[2:]))\n",
196
+ "\n",
197
+ "# We will construct 3 `ConvLSTM2D` layers with batch normalization,\n",
198
+ "# followed by a `Conv3D` layer for the spatiotemporal outputs.\n",
199
+ "x = layers.ConvLSTM2D(\n",
200
+ " filters=64,\n",
201
+ " kernel_size=(5, 5),\n",
202
+ " padding=\"same\",\n",
203
+ " return_sequences=True,\n",
204
+ " activation=\"relu\",\n",
205
+ ")(inp)\n",
206
+ "x = layers.BatchNormalization()(x)\n",
207
+ "x = layers.ConvLSTM2D(\n",
208
+ " filters=64,\n",
209
+ " kernel_size=(3, 3),\n",
210
+ " padding=\"same\",\n",
211
+ " return_sequences=True,\n",
212
+ " activation=\"relu\",\n",
213
+ ")(x)\n",
214
+ "x = layers.BatchNormalization()(x)\n",
215
+ "x = layers.ConvLSTM2D(\n",
216
+ " filters=64,\n",
217
+ " kernel_size=(1, 1),\n",
218
+ " padding=\"same\",\n",
219
+ " return_sequences=True,\n",
220
+ " activation=\"relu\",\n",
221
+ ")(x)\n",
222
+ "x = layers.Conv3D(\n",
223
+ " filters=1, kernel_size=(3, 3, 3), activation=\"sigmoid\", padding=\"same\"\n",
224
+ ")(x)\n",
225
+ "\n",
226
+ "# Next, we will build the complete model and compile it.\n",
227
+ "model = keras.models.Model(inp, x)\n",
228
+ "model.compile(\n",
229
+ " loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.Adam(),\n",
230
+ ")"
231
+ ]
232
+ },
233
+ {
234
+ "cell_type": "markdown",
235
+ "metadata": {
236
+ "id": "Nd0VLhrvx66Q"
237
+ },
238
+ "source": [
239
+ "## Model Training\n",
240
+ "\n",
241
+ "With our model and data constructed, we can now train the model."
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "code",
246
+ "execution_count": null,
247
+ "metadata": {
248
+ "id": "v9U57leux66Q"
249
+ },
250
+ "outputs": [],
251
+ "source": [
252
+ "# Define some callbacks to improve training.\n",
253
+ "early_stopping = keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=10)\n",
254
+ "reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor=\"val_loss\", patience=5)\n",
255
+ "\n",
256
+ "# Define modifiable training hyperparameters.\n",
257
+ "epochs = 20\n",
258
+ "batch_size = 5\n",
259
+ "\n",
260
+ "# Fit the model to the training data.\n",
261
+ "model.fit(\n",
262
+ " x_train,\n",
263
+ " y_train,\n",
264
+ " batch_size=batch_size,\n",
265
+ " epochs=epochs,\n",
266
+ " validation_data=(x_val, y_val),\n",
267
+ " callbacks=[early_stopping, reduce_lr],\n",
268
+ ")"
269
+ ]
270
+ },
271
+ {
272
+ "cell_type": "markdown",
273
+ "metadata": {
274
+ "id": "RxB7zZIxx66R"
275
+ },
276
+ "source": [
277
+ "## Frame Prediction Visualizations\n",
278
+ "\n",
279
+ "With our model now constructed and trained, we can generate\n",
280
+ "some example frame predictions based on a new video.\n",
281
+ "\n",
282
+ "We'll pick a random example from the validation set and\n",
283
+ "then choose the first ten frames from them. From there, we can\n",
284
+ "allow the model to predict 10 new frames, which we can compare\n",
285
+ "to the ground truth frame predictions."
286
+ ]
287
+ },
288
+ {
289
+ "cell_type": "code",
290
+ "execution_count": null,
291
+ "metadata": {
292
+ "id": "qsujRd4Ex66R"
293
+ },
294
+ "outputs": [],
295
+ "source": [
296
+ "# Select a random example from the validation dataset.\n",
297
+ "example = val_dataset[np.random.choice(range(len(val_dataset)), size=1)[0]]\n",
298
+ "\n",
299
+ "# Pick the first/last ten frames from the example.\n",
300
+ "frames = example[:10, ...]\n",
301
+ "original_frames = example[10:, ...]\n",
302
+ "\n",
303
+ "# Predict a new set of 10 frames.\n",
304
+ "for _ in range(10):\n",
305
+ " # Extract the model's prediction and post-process it.\n",
306
+ " new_prediction = model.predict(np.expand_dims(frames, axis=0))\n",
307
+ " new_prediction = np.squeeze(new_prediction, axis=0)\n",
308
+ " predicted_frame = np.expand_dims(new_prediction[-1, ...], axis=0)\n",
309
+ "\n",
310
+ " # Extend the set of prediction frames.\n",
311
+ " frames = np.concatenate((frames, predicted_frame), axis=0)\n",
312
+ "\n",
313
+ "# Construct a figure for the original and new frames.\n",
314
+ "fig, axes = plt.subplots(2, 10, figsize=(20, 4))\n",
315
+ "\n",
316
+ "# Plot the original frames.\n",
317
+ "for idx, ax in enumerate(axes[0]):\n",
318
+ " ax.imshow(np.squeeze(original_frames[idx]), cmap=\"gray\")\n",
319
+ " ax.set_title(f\"Frame {idx + 11}\")\n",
320
+ " ax.axis(\"off\")\n",
321
+ "\n",
322
+ "# Plot the new frames.\n",
323
+ "new_frames = frames[10:, ...]\n",
324
+ "for idx, ax in enumerate(axes[1]):\n",
325
+ " ax.imshow(np.squeeze(new_frames[idx]), cmap=\"gray\")\n",
326
+ " ax.set_title(f\"Frame {idx + 11}\")\n",
327
+ " ax.axis(\"off\")\n",
328
+ "\n",
329
+ "# Display the figure.\n",
330
+ "plt.show()"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "markdown",
335
+ "metadata": {
336
+ "id": "78OrJXZfx66R"
337
+ },
338
+ "source": [
339
+ "## Predicted Videos\n",
340
+ "\n",
341
+ "Finally, we'll pick a few examples from the validation set\n",
342
+ "and construct some GIFs with them to see the model's\n",
343
+ "predicted videos."
344
+ ]
345
+ },
346
+ {
347
+ "cell_type": "code",
348
+ "execution_count": null,
349
+ "metadata": {
350
+ "id": "ncMx34rLx66R"
351
+ },
352
+ "outputs": [],
353
+ "source": [
354
+ "# Select a few random examples from the dataset.\n",
355
+ "examples = val_dataset[np.random.choice(range(len(val_dataset)), size=5)]\n",
356
+ "\n",
357
+ "# Iterate over the examples and predict the frames.\n",
358
+ "predicted_videos = []\n",
359
+ "for example in examples:\n",
360
+ " # Pick the first/last ten frames from the example.\n",
361
+ " frames = example[:10, ...]\n",
362
+ " original_frames = example[10:, ...]\n",
363
+ " new_predictions = np.zeros(shape=(10, *frames[0].shape))\n",
364
+ "\n",
365
+ " # Predict a new set of 10 frames.\n",
366
+ " for i in range(10):\n",
367
+ " # Extract the model's prediction and post-process it.\n",
368
+ " frames = example[: 10 + i + 1, ...]\n",
369
+ " new_prediction = model.predict(np.expand_dims(frames, axis=0))\n",
370
+ " new_prediction = np.squeeze(new_prediction, axis=0)\n",
371
+ " predicted_frame = np.expand_dims(new_prediction[-1, ...], axis=0)\n",
372
+ "\n",
373
+ " # Extend the set of prediction frames.\n",
374
+ " new_predictions[i] = predicted_frame\n",
375
+ "\n",
376
+ " # Create and save GIFs for each of the ground truth/prediction images.\n",
377
+ " for frame_set in [original_frames, new_predictions]:\n",
378
+ " # Construct a GIF from the selected video frames.\n",
379
+ " current_frames = np.squeeze(frame_set)\n",
380
+ " current_frames = current_frames[..., np.newaxis] * np.ones(3)\n",
381
+ " current_frames = (current_frames * 255).astype(np.uint8)\n",
382
+ " current_frames = list(current_frames)\n",
383
+ "\n",
384
+ " # Construct a GIF from the frames.\n",
385
+ " with io.BytesIO() as gif:\n",
386
+ " imageio.mimsave(gif, current_frames, \"GIF\", fps=5)\n",
387
+ " predicted_videos.append(gif.getvalue())\n",
388
+ "\n",
389
+ "# Display the videos.\n",
390
+ "print(\" Truth\\tPrediction\")\n",
391
+ "for i in range(0, len(predicted_videos), 2):\n",
392
+ " # Construct and display an `HBox` with the ground truth and prediction.\n",
393
+ " box = HBox(\n",
394
+ " [\n",
395
+ " widgets.Image(value=predicted_videos[i]),\n",
396
+ " widgets.Image(value=predicted_videos[i + 1]),\n",
397
+ " ]\n",
398
+ " )\n",
399
+ " display(box)"
400
+ ]
401
+ }
402
+ ],
403
+ "metadata": {
404
+ "colab": {
405
+ "collapsed_sections": [],
406
+ "name": "conv_lstm",
407
+ "provenance": [],
408
+ "toc_visible": true
409
+ },
410
+ "kernelspec": {
411
+ "display_name": "Python 3",
412
+ "language": "python",
413
+ "name": "python3"
414
+ },
415
+ "language_info": {
416
+ "codemirror_mode": {
417
+ "name": "ipython",
418
+ "version": 3
419
+ },
420
+ "file_extension": ".py",
421
+ "mimetype": "text/x-python",
422
+ "name": "python",
423
+ "nbconvert_exporter": "python",
424
+ "pygments_lexer": "ipython3",
425
+ "version": "3.7.0"
426
+ }
427
+ },
428
+ "nbformat": 4,
429
+ "nbformat_minor": 0
430
+ }