diff --git "a/CodeTrans Datasets/DLTrans/dl_train.json" "b/CodeTrans Datasets/DLTrans/dl_train.json" new file mode 100644--- /dev/null +++ "b/CodeTrans Datasets/DLTrans/dl_train.json" @@ -0,0 +1,564 @@ +{"id": 1, "tensorflow": "x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))", "paddle": "x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\nX.sum()\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = paddle.zeros_like(Y)\nZ = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)"} +{"id": 2, "tensorflow": "import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)"} +{"id": 3, "tensorflow": "import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\nA.cumsum(axis=0)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))"} +{"id": 4, "tensorflow": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1", "paddle": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"} +{"id": 5, "tensorflow": "import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()\na.grad == d / a"} +{"id": 6, "tensorflow": "%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000"} +{"id": 7, "tensorflow": "counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)", "paddle": "counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')"} +{"id": 8, "tensorflow": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 9, "tensorflow": "%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)"} +{"id": 10, "tensorflow": "import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias"} +{"id": 11, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))"} +{"id": 12, "tensorflow": "import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"} +{"id": 13, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = tf.keras.models.Sequential()\nnet.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\nweight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)\nnet.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=.1)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())"} +{"id": 14, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)\ny = tf.nn.relu(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.relu(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',\n figsize=(5, 2.5))\ny = tf.nn.tanh(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.tanh(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 15, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\nb1 = tf.Variable(tf.zeros(num_hiddens))\nW2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\nb2 = tf.Variable(tf.zeros(num_outputs))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n return tf.math.maximum(X, 0)\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs))\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)\nnum_epochs, lr = 10, 0.1\nupdater = d2l.Updater([W1, W2, b1, b2], lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 16, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nnet = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 17, "tensorflow": "import math\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)"} +{"id": 18, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))\n b = tf.Variable(tf.zeros(shape=(1, )))\n return [w, b]\ndef l2_penalty(w):\n return tf.reduce_sum(tf.pow(w, 2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n grads = tape.gradient(l, [w, b])\n d2l.sgd([w, b], grads, lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))\n net.build(input_shape=(1, num_inputs))\n w, b = net.trainable_variables\n loss = tf.keras.losses.MeanSquaredError()\n num_epochs, lr = 100, 0.003\n trainer = tf.keras.optimizers.SGD(learning_rate=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + net.losses\n grads = tape.gradient(l, net.trainable_variables)\n trainer.apply_gradients(zip(grads, net.trainable_variables))\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 19, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return tf.zeros_like(X)\n if dropout == 0:\n return X\n mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout\n return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)\nX = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))\nnum_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(tf.keras.Model):\n def __init__(self, num_outputs, num_hiddens1, num_hiddens2):\n super().__init__()\n self.input_layer = tf.keras.layers.Flatten()\n self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')\n self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')\n self.output_layer = tf.keras.layers.Dense(num_outputs)\n def call(self, inputs, training=None):\n x = self.input_layer(inputs)\n x = self.hidden1(x)\n if training:\n x = dropout_layer(x, dropout1)\n x = self.hidden2(x)\n if training:\n x = dropout_layer(x, dropout2)\n x = self.output_layer(x)\n return x\nnet = Net(num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout1),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout2),\n tf.keras.layers.Dense(10),\n])\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 20, "tensorflow": "trainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = tf.random.normal((4, 4))\nfor i in range(100):\n M = tf.matmul(M, tf.random.normal((4, 4)))", "paddle": "trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))"} +{"id": 21, "tensorflow": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train = train_data.shape[0]\ntrain_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)\ntest_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)\ntrain_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)\nloss = tf.keras.losses.MeanSquaredError()\ndef get_net():\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return net\ndef log_rmse(y_true, y_pred):\n clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))\n return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n net.compile(loss=loss, optimizer=optimizer)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n optimizer.apply_gradients(zip(grads, params))\n train_ls.append(log_rmse(train_labels, net(train_features)))\n if test_labels is not None:\n test_ls.append(log_rmse(test_labels, net(test_features)))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = tf.concat([X_train, X_part], 0)\n y_train = tf.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "paddle": "%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 22, "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nX = tf.random.uniform((2, 20))\nnet(X)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, X):\n return self.out(self.hidden((X)))\nclass MySequential(tf.keras.Model):\n def __init__(self, *args):\n super().__init__()\n self.modules = []\n for block in args:\n self.modules.append(block)\n def call(self, X):\n for module in self.modules:\n X = module(X)\n return X\nnet = MySequential(\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nnet(X)\nclass FixedHiddenMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.rand_weight = tf.constant(tf.random.uniform((20, 20)))\n self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)\n def call(self, inputs):\n X = self.flatten(inputs)\n X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)\n X = self.dense(X)\n while tf.reduce_sum(tf.math.abs(X)) > 1:\n X /= 2\n return tf.reduce_sum(X)\nclass NestMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.net = tf.keras.Sequential()\n self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))\n self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))\n self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)\n def call(self, inputs):\n return self.dense(self.net(inputs))\nchimera = tf.keras.Sequential()\nchimera.add(NestMLP())\nchimera.add(tf.keras.layers.Dense(20))\nchimera.add(FixedHiddenMLP())\nchimera(X)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"} +{"id": 23, "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu),\n tf.keras.layers.Dense(1),\n])\nX = tf.random.uniform((2, 4))\nnet(X)\nnet.get_weights()[1]\ndef block1(name):\n return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)\ndef block2():\n net = tf.keras.Sequential()\n for i in range(4):\n net.add(block1(name=f'block-{i}'))\n return net\nrgnet = tf.keras.Sequential()\nrgnet.add(block2())\nrgnet.add(tf.keras.layers.Dense(1))\nrgnet(X)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1)])\nnet(X)\nnet.weights[0], net.weights[1]\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1),\n])\nnet(X)\nnet.weights[0], net.weights[1]\n net = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),\n tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),\n])\nnet(X)\nclass MyInit(tf.keras.initializers.Initializer):\n def __call__(self, shape, dtype=None):\n data=tf.random.uniform(shape, -10, 10, dtype=dtype)\n factor=(tf.abs(data) >= 5)\n factor=tf.cast(factor, tf.float32)\n return data * factor\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),\n tf.keras.layers.Dense(1))\nnet(X)\nnet.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)\nnet.layers[1].weights[0][0, 0].assign(42)\nnet.layers[1].weights[0]\nlayer = CenteredLayer()\nlayer(tf.constant([1, 2, 3, 4, 5]))\nnet = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())"} +{"id": 24, "tensorflow": "import tensorflow as tf\nclass CenteredLayer(tf.keras.Model):\n def __init__(self):\n super().__init__()\n def call(self, inputs):\n return inputs - tf.reduce_mean(inputs)\nY = net(tf.random.uniform((4, 8)))\ntf.reduce_mean(Y)\nclass MyDense(tf.keras.Model):\n def __init__(self, units):\n super().__init__()\n self.units = units\n def build(self, X_shape):\n self.weight = self.add_weight(name='weight',\n shape=[X_shape[-1], self.units],\n initializer=tf.random_normal_initializer())\n self.bias = self.add_weight(\n name='bias', shape=[self.units],\n initializer=tf.zeros_initializer())\n def call(self, X):\n linear = tf.matmul(X, self.weight) + self.bias\n return tf.nn.relu(linear)\ndense = MyDense(3)\ndense(tf.random.uniform((2, 5)))\ndense.get_weights()\ndense(tf.random.uniform((2, 5)))\nnet = tf.keras.models.Sequential([MyDense(8), MyDense(1)])\nnet(tf.random.uniform((2, 64)))", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))"} +{"id": 25, "tensorflow": "import numpy as np\nimport tensorflow as tf\nx = tf.range(4)\nnp.save('x-file.npy', x)\nx2 = np.load('x-file.npy', allow_pickle=True)\ny = tf.zeros(4)\nnp.save('xy-files.npy', [x, y])\nx2, y2 = np.load('xy-files.npy', allow_pickle=True)\nmydict = {'x': x, 'y': y}\nnp.save('mydict.npy', mydict)\nmydict2 = np.load('mydict.npy', allow_pickle=True)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, inputs):\n x = self.flatten(inputs)\n x = self.hidden(x)\n return self.out(x)\nnet = MLP()\nX = tf.random.uniform((2, 20))\nY = net(X)\nnet.save_weights('mlp.params')\nclone = MLP()\nclone.load_weights('mlp.params')", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()"} +{"id": 26, "tensorflow": "import tensorflow as tf\ntf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')\nlen(tf.config.experimental.list_physical_devices('GPU'))\ndef try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')\ndef try_all_gpus():\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = tf.constant([1, 2, 3])\nx.device\nwith try_gpu():\n X = tf.ones((2, 3))\nwith try_gpu(1):\n Y = tf.random.uniform((2, 3))\nwith try_gpu(1):\n Z = X\nwith try_gpu(1):\n Z2 = Z\nZ2 is Z\nstrategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n net = tf.keras.models.Sequential([\n tf.keras.layers.Dense(1)])\nnet.layers[0].weights[0].device, net.layers[0].weights[1].device", "paddle": "import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place"} +{"id": 27, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j].assign(tf.reduce_sum(\n X[i: i + h, j: j + w] * K))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = tf.constant([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n def build(self, kernel_size):\n initializer = tf.random_normal_initializer()\n self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)\n self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)\n def call(self, inputs):\n return corr2d(inputs, self.weight) + self.bias\nX = tf.Variable(tf.ones((6, 8)))\nX[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))\nK = tf.constant([[1.0, -1.0]])\ncorr2d(tf.transpose(X), K)\nconv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)\nX = tf.reshape(X, (1, 6, 8, 1))\nY = tf.reshape(Y, (1, 6, 7, 1))\nlr = 3e-2\nY_hat = conv2d(X)\nfor i in range(10):\n with tf.GradientTape(watch_accessed_variables=False) as g:\n g.watch(conv2d.weights[0])\n Y_hat = conv2d(X)\n l = (abs(Y_hat - Y)) ** 2\n update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))\n weights = conv2d.get_weights()\n weights[0] = conv2d.weights[0] - update\n conv2d.set_weights(weights)\ntf.reshape(conv2d.get_weights()[0], (1, 2))", "paddle": "import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))"} +{"id": 28, "tensorflow": "import tensorflow as tf\n\ndef comp_conv2d(conv2d, X):\n X = tf.reshape(X, (1, ) + X.shape + (1, ))\n Y = conv2d(X)\n return tf.reshape(Y, Y.shape[1:3])\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')\nX = tf.random.uniform(shape=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))\ncomp_conv2d(conv2d, X).shape", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 29, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d_multi_in(X, K):\n return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)\nX = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return tf.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = tf.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = tf.reshape(X, (c_i, h * w))\n K = tf.reshape(K, (c_o, c_i))\n Y = tf.matmul(K, X)\n return tf.reshape(Y, (c_o, h, w))\nX = tf.random.normal((3, 3, 3), 0, 1)\nK = tf.random.normal((2, 3, 1, 1), 0, 1)\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 30, "tensorflow": "import tensorflow as tf\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))\n elif mode =='avg':\n Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])\npool2d(X)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)\npaddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',\n strides=(2, 3))\npool2d(X_padded)\nX = tf.concat([X, X + 1], 3)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)"} +{"id": 31, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120, activation='sigmoid'),\n tf.keras.layers.Dense(84, activation='sigmoid'),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 28, 28, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\nclass TrainCallback(tf.keras.callbacks.Callback):\n def __init__(self, net, train_iter, test_iter, num_epochs, device_name):\n self.timer = d2l.Timer()\n self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n self.net = net\n self.train_iter = train_iter\n self.test_iter = test_iter\n self.num_epochs = num_epochs\n self.device_name = device_name\n def on_epoch_begin(self, epoch, logs=None):\n self.timer.start()\n def on_epoch_end(self, epoch, logs):\n self.timer.stop()\n test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']\n metrics = (logs['loss'], logs['accuracy'], test_acc)\n self.animator.add(epoch + 1, metrics)\n if epoch == self.num_epochs - 1:\n batch_size = next(iter(self.train_iter))[0].shape[0]\n num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()\ndef train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 32, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 33, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef vgg_block(num_convs, num_channels):\n blk = tf.keras.models.Sequential()\n for _ in range(num_convs):\n blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))\n blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = tf.keras.models.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)]))\n return net\nnet = vgg(conv_arch)\nX = tf.random.uniform((1, 224, 224, 1))\nfor blk in net.layers:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = lambda: vgg(small_conv_arch)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)"} +{"id": 34, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef nin_block(num_channels, kernel_size, strides, padding):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])\ndef net():\n return tf.keras.models.Sequential([\n nin_block(96, kernel_size=11, strides=4, padding='valid'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Reshape((1, 1, 10)),\n tf.keras.layers.Flatten(),\n ])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 35, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Inception(tf.keras.Model):\n def __init__(self, c1, c2, c3, c4):\n super().__init__()\n self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')\n self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')\n self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')\n self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')\n self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')\n self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')\n self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')\n def call(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return tf.keras.layers.Concatenate()([p1, p2, p3, p4])\ndef b1():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b2():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, 1, activation='relu'),\n tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b3():\n return tf.keras.models.Sequential([\n Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b4():\n return tf.keras.Sequential([\n Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b5():\n return tf.keras.Sequential([\n Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Flatten()\n ])\ndef net():\n return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform(shape=(1, 96, 96, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 36, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps):\n inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)\n inv *= gamma\n Y = X * inv + (beta - moving_mean * inv)\n return Y\nclass BatchNorm(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(BatchNorm, self).__init__(**kwargs)\n def build(self, input_shape):\n weight_shape = [input_shape[-1], ]\n self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)\n self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)\n self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)\n self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)\n super(BatchNorm, self).build(input_shape)\n def assign_moving_average(self, variable, value):\n momentum = 0.9\n delta = variable * momentum + value * (1 - momentum)\n return variable.assign(delta)\n @tf.function\n def call(self, inputs, training):\n if training:\n axes = list(range(len(inputs.shape) - 1))\n batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)\n batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)\n batch_mean = tf.squeeze(batch_mean, axes)\n batch_variance = tf.squeeze(batch_variance, axes)\n mean_update = self.assign_moving_average(self.moving_mean, batch_mean)\n variance_update = self.assign_moving_average(self.moving_variance, batch_variance)\n self.add_update(mean_update)\n self.add_update(variance_update)\n mean, variance = batch_mean, batch_variance\n else:\n mean, variance = self.moving_mean, self.moving_variance\n output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)\n return output\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10)]\n )\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\ntf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10),\n ])", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))"} +{"id": 37, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Residual(tf.keras.Model):\n def __init__(self, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = tf.keras.layers.Conv2D(\n num_channels, padding='same', kernel_size=3, strides=strides)\n self.conv2 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=3, padding='same')\n self.conv3 = None\n if use_1x1conv:\n self.conv3 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=1, strides=strides)\n self.bn1 = tf.keras.layers.BatchNormalization()\n self.bn2 = tf.keras.layers.BatchNormalization()\n def call(self, X):\n Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3 is not None:\n X = self.conv3(X)\n Y += X\n return tf.keras.activations.relu(Y)\nblk = Residual(3)\nX = tf.random.uniform((4, 6, 6, 3))\nY = blk(X)\nY.shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\nclass ResnetBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):\n super(ResnetBlock, self).__init__(**kwargs)\n self.residual_layers = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n self.residual_layers.append(Residual(num_channels))\n def call(self, X):\n for layer in self.residual_layers.layers:\n X = layer(X)\n return X\nb2 = ResnetBlock(64, 2, first_block=True)\nb3 = ResnetBlock(128, 2)\nb4 = ResnetBlock(256, 2)\nb5 = ResnetBlock(512, 2)\ndef net():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),\n ResnetBlock(64, 2, first_block=True),\n ResnetBlock(128, 2),\n ResnetBlock(256, 2),\n ResnetBlock(512, 2),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Dense(units=10)])\nX = tf.random.uniform(shape=(1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 38, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels):\n super(ConvBlock, self).__init__()\n self.bn = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')\n self.listLayers = [self.bn, self.relu, self.conv]\n def call(self, x):\n y = x\n for layer in self.listLayers.layers:\n y = layer(y)\n y = tf.keras.layers.concatenate([x,y], axis=-1)\n return y\nclass DenseBlock(tf.keras.layers.Layer):\n def __init__(self, num_convs, num_channels):\n super(DenseBlock, self).__init__()\n self.listLayers = []\n for _ in range(num_convs):\n self.listLayers.append(ConvBlock(num_channels))\n def call(self, x):\n for layer in self.listLayers.layers:\n x = layer(x)\n return x\nblk = DenseBlock(2, 10)\nX = tf.random.uniform((4, 8, 8, 3))\nY = blk(X)\nY.shape\nclass TransitionBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n self.batch_norm = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)\n self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)\n def call(self, x):\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.conv(x)\n return self.avg_pool(x)\nblk = TransitionBlock(10)\nblk(Y).shape\ndef block_1():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef block_2():\n net = block_1()\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = [4, 4, 4, 4]\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(TransitionBlock(num_channels))\n return net\ndef net():\n net = block_2()\n net.add(tf.keras.layers.BatchNormalization())\n net.add(tf.keras.layers.ReLU())\n net.add(tf.keras.layers.GlobalAvgPool2D())\n net.add(tf.keras.layers.Flatten())\n net.add(tf.keras.layers.Dense(10))\n return net", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"} +{"id": 39, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nT = 1000\ntime = tf.range(1, T + 1, dtype=tf.float32)\nx = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = tf.Variable(tf.zeros((T - tau, tau)))\nfor i in range(tau):\n features[:, i].assign(x[i: T - tau + i])\nlabels = tf.reshape(x[tau:], (-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(1)])\n return net\nloss = tf.keras.losses.MeanSquaredError()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = tf.keras.optimizers.Adam()\n for epoch in range(epochs):\n for X, y in train_iter:\n with tf.GradientTape() as g:\n out = net(X)\n l = loss(y, out)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n trainer.apply_gradients(zip(grads, params))\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.numpy(), onestep_preds.numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = tf.Variable(tf.zeros(T))\nmultistep_preds[:n_train + tau].assign(x[:n_train + tau])\nfor i in range(n_train + tau, T):\n multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.numpy(), onestep_preds.numpy(),\n multistep_preds[n_train + tau:].numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))\nfor i in range(tau):\n features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())\nfor i in range(tau, tau + max_steps):\n features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 40, "tensorflow": "import collections\nimport re\nfrom d2l import tensorflow as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)", "paddle": "import collections\nimport re\nfrom d2l import paddle as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"} +{"id": 41, "tensorflow": "import random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield tf.constant(X), tf.constant(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = tf.constant(corpus[offset: offset + num_tokens])\n Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])\n Xs = tf.reshape(Xs, (batch_size, -1))\n Ys = tf.reshape(Ys, (batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_batches * num_steps, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 42, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nX, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)\nH, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)\ntf.matmul(X, W_xh) + tf.matmul(H, W_hh)\ntf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))"} +{"id": 43, "tensorflow": "%matplotlib inline\nimport math\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ntrain_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)\ntf.one_hot(tf.constant([0, 2]), len(vocab))\nX = tf.reshape(tf.range(10), (2, 5))\ntf.one_hot(tf.transpose(X), 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)\n W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)\n b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.init_state, self.forward_fn = init_state, forward_fn\n self.trainable_variables = get_params(vocab_size, num_hiddens)\n def __call__(self, X, state):\n X = tf.one_hot(tf.transpose(X), self.vocab_size)\n X = tf.cast(X, tf.float32)\n return self.forward_fn(X, state, self.trainable_variables)\n def begin_state(self, batch_size, *args, **kwargs):\n return self.init_state(batch_size, self.num_hiddens)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_hiddens = 512\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab):\n state = net.begin_state(batch_size=1, dtype=tf.float32)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),\n (1, 1)).numpy()\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab)\ndef grad_clipping(grads, theta):\n theta = tf.constant(theta, dtype=tf.float32)\n new_grad = []\n for grad in grads:\n if isinstance(grad, tf.IndexedSlices):\n new_grad.append(tf.convert_to_tensor(grad))\n else:\n new_grad.append(grad)\n norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()\n for grad in new_grad))\n norm = tf.cast(norm, tf.float32)\n if tf.greater(norm, theta):\n for i, grad in enumerate(new_grad):\n new_grad[i] = grad * theta / norm\n else:\n new_grad = new_grad\n return new_grad\ndef train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as g:\n y_hat, state = net(X, state)\n y = tf.reshape(tf.transpose(Y), (-1))\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n grads = grad_clipping(grads, 1)\n updater.apply_gradients(zip(grads, params))\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):\n with strategy.scope():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n updater = tf.keras.optimizers.SGD(lr)\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\n device = d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, strategy)\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\ntrain_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"} +{"id": 44, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')\nrnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)\nstate = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)\nstate.shape\nX = tf.random.uniform((num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(tf.keras.layers.Layer):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = tf.keras.layers.Dense(vocab_size)\n def call(self, inputs, state):\n X = tf.one_hot(tf.transpose(inputs), self.vocab_size)\n Y, *state = self.rnn(X, state)\n output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.cell.get_initial_state(*args, **kwargs)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n net = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"} +{"id": 45, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n def three():\n return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)\n R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)\n H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_epochs, lr = 500, 1\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\ngru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')\ngru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 46, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))\ndef lstm(inputs, state, params):\n W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params\n (H, C) = state\n outputs = []\n for X in inputs:\n X=tf.reshape(X,[-1,W_xi.shape[0]])\n I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)\n F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)\n O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)\n C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * tf.tanh(C)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,C)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\nlstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')\nlstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 47, "tensorflow": "import os\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = tf.constant([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = tf.reduce_sum(\n tf.cast(array != vocab[''], tf.int32), 1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', tf.cast(X, tf.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', tf.cast(Y, tf.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 48, "mxnet": "x = np.arange(12)\nx.size\nX = x.reshape(3, 4)\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))", "paddle": "x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)"} +{"id": 49, "mxnet": "from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)"} +{"id": 50, "mxnet": "from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nA.T\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\na = A.sum()\nA.mean()\nA.sum() / A.size\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))"} +{"id": 51, "mxnet": "%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1", "paddle": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"} +{"id": 52, "mxnet": "from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()"} +{"id": 53, "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000"} +{"id": 54, "mxnet": "counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)", "paddle": "counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')"} +{"id": 55, "mxnet": "%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 56, "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)"} +{"id": 57, "mxnet": "from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias"} +{"id": 58, "mxnet": "%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))"} +{"id": 59, "mxnet": "from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"} +{"id": 60, "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())"} +{"id": 61, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 62, "mxnet": "from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 63, "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 64, "mxnet": "import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)"} +{"id": 65, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 66, "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 67, "mxnet": "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))", "paddle": "trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))"} +{"id": 68, "mxnet": "%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "paddle": "%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 69, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"} +{"id": 70, "mxnet": "from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())"} +{"id": 71, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))"} +{"id": 72, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()"} +{"id": 73, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx", "paddle": "import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place"} +{"id": 74, "mxnet": "from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))", "paddle": "import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))"} +{"id": 75, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 76, "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 77, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)"} +{"id": 78, "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 79, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 80, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 81, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 82, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 83, "mxnet": "from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))"} +{"id": 84, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 85, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"} +{"id": 86, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 87, "mxnet": "import collections\nimport re\nfrom d2l import mxnet as d2l", "paddle": "import collections\nimport re\nfrom d2l import paddle as d2l"} +{"id": 88, "mxnet": "import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 89, "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))"} +{"id": 90, "mxnet": "%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"} +{"id": 91, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"} +{"id": 92, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 93, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 94, "mxnet": "import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 95, "pytorch": "x = torch.arange(12)\nx.numel()\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))", "mxnet": "x = np.arange(12)\nx.size\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))"} +{"id": 96, "pytorch": "import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)", "mxnet": "from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)"} +{"id": 97, "pytorch": "import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\nA.mean()\nA.sum() / A.numel()\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))", "mxnet": "from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\nA.mean()\nA.sum() / A.size\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))"} +{"id": 98, "pytorch": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x", "mxnet": "%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x"} +{"id": 99, "pytorch": "import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()", "mxnet": "from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()"} +{"id": 100, "pytorch": "%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()", "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)"} +{"id": 101, "pytorch": "counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)", "mxnet": "counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)"} +{"id": 102, "pytorch": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "mxnet": "%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 103, "pytorch": "%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)", "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"} +{"id": 104, "pytorch": "import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\nloss = nn.MSELoss()\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data", "mxnet": "from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()"} +{"id": 105, "pytorch": "%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))", "mxnet": "%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"} +{"id": 106, "pytorch": "import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]", "mxnet": "from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]"} +{"id": 107, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)", "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})"} +{"id": 108, "pytorch": "%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 109, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "mxnet": "from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))"} +{"id": 110, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 111, "pytorch": "import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))", "mxnet": "import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"} +{"id": 112, "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 113, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 114, "pytorch": "trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))", "mxnet": "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))"} +{"id": 115, "pytorch": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "mxnet": "%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 116, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)"} +{"id": 117, "pytorch": "import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())", "mxnet": "from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()"} +{"id": 118, "pytorch": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))"} +{"id": 119, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')"} +{"id": 120, "pytorch": "import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx"} +{"id": 121, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))", "mxnet": "from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))"} +{"id": 122, "pytorch": "import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 123, "pytorch": "import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6", "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 124, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)"} +{"id": 125, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 126, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"} +{"id": 127, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)"} +{"id": 128, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"} +{"id": 129, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 130, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))", "mxnet": "from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))"} +{"id": 131, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 132, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))"} +{"id": 133, "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 134, "pytorch": "import collections\nimport re\nfrom d2l import torch as d2l", "mxnet": "import collections\nimport re\nfrom d2l import mxnet as d2l"} +{"id": 135, "pytorch": "import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "mxnet": "import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 136, "pytorch": "import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))", "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))"} +{"id": 137, "pytorch": "%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])", "mxnet": "%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])"} +{"id": 138, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)"} +{"id": 139, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 140, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 141, "pytorch": "import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "mxnet": "import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 142, "pytorch": "x = torch.arange(12)\nX = x.reshape(3, 4)\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))", "paddle": "x = paddle.arange(12)\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)"} +{"id": 143, "pytorch": "import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)"} +{"id": 144, "pytorch": "import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nA.T\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\na = A.sum()\nA.mean()\nA.sum() / A.numel()\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))"} +{"id": 145, "pytorch": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1", "paddle": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"} +{"id": 146, "pytorch": "import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()"} +{"id": 147, "pytorch": "%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000"} +{"id": 148, "pytorch": "counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)", "paddle": "counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')"} +{"id": 149, "pytorch": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 150, "pytorch": "%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)"} +{"id": 151, "pytorch": "import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias"} +{"id": 152, "pytorch": "%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))"} +{"id": 153, "pytorch": "import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"} +{"id": 154, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())"} +{"id": 155, "pytorch": "%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 156, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 157, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 158, "pytorch": "import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)"} +{"id": 159, "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 160, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 161, "pytorch": "trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))", "paddle": "trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))"} +{"id": 162, "pytorch": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid", "paddle": "%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid"} +{"id": 163, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"} +{"id": 164, "pytorch": "import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))"} +{"id": 165, "pytorch": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))"} +{"id": 166, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()"} +{"id": 167, "pytorch": "import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device", "paddle": "import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place"} +{"id": 168, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))", "paddle": "import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))"} +{"id": 169, "pytorch": "import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape", "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 170, "pytorch": "import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 171, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)"} +{"id": 172, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 173, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 174, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 175, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 176, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 177, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))"} +{"id": 178, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 179, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"} +{"id": 180, "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 181, "pytorch": "import collections\nimport re\nfrom d2l import torch as d2l", "paddle": "import collections\nimport re\nfrom d2l import paddle as d2l"} +{"id": 182, "pytorch": "import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 183, "pytorch": "import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))"} +{"id": 184, "pytorch": "%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)", "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"} +{"id": 185, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"} +{"id": 186, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 187, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 188, "pytorch": "import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 189, "pytorch": "x = torch.arange(12)\nx.numel()\nX = x.reshape(3, 4)\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\nX.sum()\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))", "tensorflow": "x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))"} +{"id": 190, "pytorch": "import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)", "tensorflow": "import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)"} +{"id": 191, "pytorch": "import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nA.T\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\na = A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean()\nA.sum() / A.numel()\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\nA.cumsum(axis=0)\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))", "tensorflow": "import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))"} +{"id": 192, "pytorch": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x", "tensorflow": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x"} +{"id": 193, "pytorch": "import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()\na.grad == d / a", "tensorflow": "import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a"} +{"id": 194, "pytorch": "%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()", "tensorflow": "%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()"} +{"id": 195, "pytorch": "counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)", "tensorflow": "counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)"} +{"id": 196, "pytorch": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]", "tensorflow": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])"} +{"id": 197, "pytorch": "%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)", "tensorflow": "%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"} +{"id": 198, "pytorch": "import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\nloss = nn.MSELoss()\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data", "tensorflow": "import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]"} +{"id": 199, "pytorch": "%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))"} +{"id": 200, "pytorch": "import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)", "tensorflow": "import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"} +{"id": 201, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = tf.keras.models.Sequential()\nnet.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\nweight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)\nnet.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=.1)"} +{"id": 202, "pytorch": "%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)\ny = tf.nn.relu(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.relu(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',\n figsize=(5, 2.5))\ny = tf.nn.tanh(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.tanh(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 203, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\nb1 = tf.Variable(tf.zeros(num_hiddens))\nW2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\nb2 = tf.Variable(tf.zeros(num_outputs))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n return tf.math.maximum(X, 0)\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs))\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)\nnum_epochs, lr = 10, 0.1\nupdater = d2l.Updater([W1, W2, b1, b2], lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 204, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nnet = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 205, "pytorch": "import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))", "tensorflow": "import math\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"} +{"id": 206, "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))\n b = tf.Variable(tf.zeros(shape=(1, )))\n return [w, b]\ndef l2_penalty(w):\n return tf.reduce_sum(tf.pow(w, 2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n grads = tape.gradient(l, [w, b])\n d2l.sgd([w, b], grads, lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))\n net.build(input_shape=(1, num_inputs))\n w, b = net.trainable_variables\n loss = tf.keras.losses.MeanSquaredError()\n num_epochs, lr = 100, 0.003\n trainer = tf.keras.optimizers.SGD(learning_rate=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + net.losses\n grads = tape.gradient(l, net.trainable_variables)\n trainer.apply_gradients(zip(grads, net.trainable_variables))\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 207, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return tf.zeros_like(X)\n if dropout == 0:\n return X\n mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout\n return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)\nX = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))\nnum_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(tf.keras.Model):\n def __init__(self, num_outputs, num_hiddens1, num_hiddens2):\n super().__init__()\n self.input_layer = tf.keras.layers.Flatten()\n self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')\n self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')\n self.output_layer = tf.keras.layers.Dense(num_outputs)\n def call(self, inputs, training=None):\n x = self.input_layer(inputs)\n x = self.hidden1(x)\n if training:\n x = dropout_layer(x, dropout1)\n x = self.hidden2(x)\n if training:\n x = dropout_layer(x, dropout2)\n x = self.output_layer(x)\n return x\nnet = Net(num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout1),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout2),\n tf.keras.layers.Dense(10),\n])\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 208, "pytorch": "trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))", "tensorflow": "trainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = tf.random.normal((4, 4))\nfor i in range(100):\n M = tf.matmul(M, tf.random.normal((4, 4)))"} +{"id": 209, "pytorch": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "tensorflow": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train = train_data.shape[0]\ntrain_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)\ntest_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)\ntrain_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)\nloss = tf.keras.losses.MeanSquaredError()\ndef get_net():\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return net\ndef log_rmse(y_true, y_pred):\n clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))\n return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n net.compile(loss=loss, optimizer=optimizer)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n optimizer.apply_gradients(zip(grads, params))\n train_ls.append(log_rmse(train_labels, net(train_features)))\n if test_labels is not None:\n test_ls.append(log_rmse(test_labels, net(test_features)))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = tf.concat([X_train, X_part], 0)\n y_train = tf.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 210, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)", "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nX = tf.random.uniform((2, 20))\nnet(X)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, X):\n return self.out(self.hidden((X)))\nclass MySequential(tf.keras.Model):\n def __init__(self, *args):\n super().__init__()\n self.modules = []\n for block in args:\n self.modules.append(block)\n def call(self, X):\n for module in self.modules:\n X = module(X)\n return X\nnet = MySequential(\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nnet(X)\nclass FixedHiddenMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.rand_weight = tf.constant(tf.random.uniform((20, 20)))\n self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)\n def call(self, inputs):\n X = self.flatten(inputs)\n X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)\n X = self.dense(X)\n while tf.reduce_sum(tf.math.abs(X)) > 1:\n X /= 2\n return tf.reduce_sum(X)\nclass NestMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.net = tf.keras.Sequential()\n self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))\n self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))\n self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)\n def call(self, inputs):\n return self.dense(self.net(inputs))\nchimera = tf.keras.Sequential()\nchimera.add(NestMLP())\nchimera.add(tf.keras.layers.Dense(20))\nchimera.add(FixedHiddenMLP())\nchimera(X)"} +{"id": 211, "pytorch": "import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())", "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu),\n tf.keras.layers.Dense(1),\n])\nX = tf.random.uniform((2, 4))\nnet(X)\nnet.get_weights()[1]\ndef block1(name):\n return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)\ndef block2():\n net = tf.keras.Sequential()\n for i in range(4):\n net.add(block1(name=f'block-{i}'))\n return net\nrgnet = tf.keras.Sequential()\nrgnet.add(block2())\nrgnet.add(tf.keras.layers.Dense(1))\nrgnet(X)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1)])\nnet(X)\nnet.weights[0], net.weights[1]\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1),\n])\nnet(X)\nnet.weights[0], net.weights[1]\n net = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),\n tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),\n])\nnet(X)\nclass MyInit(tf.keras.initializers.Initializer):\n def __call__(self, shape, dtype=None):\n data=tf.random.uniform(shape, -10, 10, dtype=dtype)\n factor=(tf.abs(data) >= 5)\n factor=tf.cast(factor, tf.float32)\n return data * factor\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),\n tf.keras.layers.Dense(1))\nnet(X)\nnet.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)\nnet.layers[1].weights[0][0, 0].assign(42)\nnet.layers[1].weights[0]\nlayer = CenteredLayer()\nlayer(tf.constant([1, 2, 3, 4, 5]))\nnet = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])"} +{"id": 212, "pytorch": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))", "tensorflow": "import tensorflow as tf\nclass CenteredLayer(tf.keras.Model):\n def __init__(self):\n super().__init__()\n def call(self, inputs):\n return inputs - tf.reduce_mean(inputs)\nY = net(tf.random.uniform((4, 8)))\ntf.reduce_mean(Y)\nclass MyDense(tf.keras.Model):\n def __init__(self, units):\n super().__init__()\n self.units = units\n def build(self, X_shape):\n self.weight = self.add_weight(name='weight',\n shape=[X_shape[-1], self.units],\n initializer=tf.random_normal_initializer())\n self.bias = self.add_weight(\n name='bias', shape=[self.units],\n initializer=tf.zeros_initializer())\n def call(self, X):\n linear = tf.matmul(X, self.weight) + self.bias\n return tf.nn.relu(linear)\ndense = MyDense(3)\ndense(tf.random.uniform((2, 5)))\ndense.get_weights()\ndense(tf.random.uniform((2, 5)))\nnet = tf.keras.models.Sequential([MyDense(8), MyDense(1)])\nnet(tf.random.uniform((2, 64)))"} +{"id": 213, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()", "tensorflow": "import numpy as np\nimport tensorflow as tf\nx = tf.range(4)\nnp.save('x-file.npy', x)\nx2 = np.load('x-file.npy', allow_pickle=True)\ny = tf.zeros(4)\nnp.save('xy-files.npy', [x, y])\nx2, y2 = np.load('xy-files.npy', allow_pickle=True)\nmydict = {'x': x, 'y': y}\nnp.save('mydict.npy', mydict)\nmydict2 = np.load('mydict.npy', allow_pickle=True)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, inputs):\n x = self.flatten(inputs)\n x = self.hidden(x)\n return self.out(x)\nnet = MLP()\nX = tf.random.uniform((2, 20))\nY = net(X)\nnet.save_weights('mlp.params')\nclone = MLP()\nclone.load_weights('mlp.params')"} +{"id": 214, "pytorch": "import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device", "tensorflow": "import tensorflow as tf\ntf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')\nlen(tf.config.experimental.list_physical_devices('GPU'))\ndef try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')\ndef try_all_gpus():\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = tf.constant([1, 2, 3])\nx.device\nwith try_gpu():\n X = tf.ones((2, 3))\nwith try_gpu(1):\n Y = tf.random.uniform((2, 3))\nwith try_gpu(1):\n Z = X\nwith try_gpu(1):\n Z2 = Z\nZ2 is Z\nstrategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n net = tf.keras.models.Sequential([\n tf.keras.layers.Dense(1)])\nnet.layers[0].weights[0].device, net.layers[0].weights[1].device"} +{"id": 215, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j].assign(tf.reduce_sum(\n X[i: i + h, j: j + w] * K))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = tf.constant([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n def build(self, kernel_size):\n initializer = tf.random_normal_initializer()\n self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)\n self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)\n def call(self, inputs):\n return corr2d(inputs, self.weight) + self.bias\nX = tf.Variable(tf.ones((6, 8)))\nX[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))\nK = tf.constant([[1.0, -1.0]])\ncorr2d(tf.transpose(X), K)\nconv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)\nX = tf.reshape(X, (1, 6, 8, 1))\nY = tf.reshape(Y, (1, 6, 7, 1))\nlr = 3e-2\nY_hat = conv2d(X)\nfor i in range(10):\n with tf.GradientTape(watch_accessed_variables=False) as g:\n g.watch(conv2d.weights[0])\n Y_hat = conv2d(X)\n l = (abs(Y_hat - Y)) ** 2\n update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))\n weights = conv2d.get_weights()\n weights[0] = conv2d.weights[0] - update\n conv2d.set_weights(weights)\ntf.reshape(conv2d.get_weights()[0], (1, 2))"} +{"id": 216, "pytorch": "import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape", "tensorflow": "import tensorflow as tf\n\ndef comp_conv2d(conv2d, X):\n X = tf.reshape(X, (1, ) + X.shape + (1, ))\n Y = conv2d(X)\n return tf.reshape(Y, Y.shape[1:3])\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')\nX = tf.random.uniform(shape=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 217, "pytorch": "import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d_multi_in(X, K):\n return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)\nX = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return tf.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = tf.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = tf.reshape(X, (c_i, h * w))\n K = tf.reshape(K, (c_o, c_i))\n Y = tf.matmul(K, X)\n return tf.reshape(Y, (c_o, h, w))\nX = tf.random.normal((3, 3, 3), 0, 1)\nK = tf.random.normal((2, 3, 1, 1), 0, 1)\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6"} +{"id": 218, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)", "tensorflow": "import tensorflow as tf\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))\n elif mode =='avg':\n Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])\npool2d(X)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)\npaddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',\n strides=(2, 3))\npool2d(X_padded)\nX = tf.concat([X, X + 1], 3)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)"} +{"id": 219, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120, activation='sigmoid'),\n tf.keras.layers.Dense(84, activation='sigmoid'),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 28, 28, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\nclass TrainCallback(tf.keras.callbacks.Callback):\n def __init__(self, net, train_iter, test_iter, num_epochs, device_name):\n self.timer = d2l.Timer()\n self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n self.net = net\n self.train_iter = train_iter\n self.test_iter = test_iter\n self.num_epochs = num_epochs\n self.device_name = device_name\n def on_epoch_begin(self, epoch, logs=None):\n self.timer.start()\n def on_epoch_end(self, epoch, logs):\n self.timer.stop()\n test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']\n metrics = (logs['loss'], logs['accuracy'], test_acc)\n self.animator.add(epoch + 1, metrics)\n if epoch == self.num_epochs - 1:\n batch_size = next(iter(self.train_iter))[0].shape[0]\n num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()\ndef train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net"} +{"id": 220, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape:\t', X.shape)"} +{"id": 221, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef vgg_block(num_convs, num_channels):\n blk = tf.keras.models.Sequential()\n for _ in range(num_convs):\n blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))\n blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = tf.keras.models.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)]))\n return net\nnet = vgg(conv_arch)\nX = tf.random.uniform((1, 224, 224, 1))\nfor blk in net.layers:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = lambda: vgg(small_conv_arch)"} +{"id": 222, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef nin_block(num_channels, kernel_size, strides, padding):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])\ndef net():\n return tf.keras.models.Sequential([\n nin_block(96, kernel_size=11, strides=4, padding='valid'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Reshape((1, 1, 10)),\n tf.keras.layers.Flatten(),\n ])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 223, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Inception(tf.keras.Model):\n def __init__(self, c1, c2, c3, c4):\n super().__init__()\n self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')\n self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')\n self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')\n self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')\n self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')\n self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')\n self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')\n def call(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return tf.keras.layers.Concatenate()([p1, p2, p3, p4])\ndef b1():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b2():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, 1, activation='relu'),\n tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b3():\n return tf.keras.models.Sequential([\n Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b4():\n return tf.keras.Sequential([\n Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b5():\n return tf.keras.Sequential([\n Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Flatten()\n ])\ndef net():\n return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform(shape=(1, 96, 96, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 224, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps):\n inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)\n inv *= gamma\n Y = X * inv + (beta - moving_mean * inv)\n return Y\nclass BatchNorm(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(BatchNorm, self).__init__(**kwargs)\n def build(self, input_shape):\n weight_shape = [input_shape[-1], ]\n self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)\n self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)\n self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)\n self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)\n super(BatchNorm, self).build(input_shape)\n def assign_moving_average(self, variable, value):\n momentum = 0.9\n delta = variable * momentum + value * (1 - momentum)\n return variable.assign(delta)\n @tf.function\n def call(self, inputs, training):\n if training:\n axes = list(range(len(inputs.shape) - 1))\n batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)\n batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)\n batch_mean = tf.squeeze(batch_mean, axes)\n batch_variance = tf.squeeze(batch_variance, axes)\n mean_update = self.assign_moving_average(self.moving_mean, batch_mean)\n variance_update = self.assign_moving_average(self.moving_variance, batch_variance)\n self.add_update(mean_update)\n self.add_update(variance_update)\n mean, variance = batch_mean, batch_variance\n else:\n mean, variance = self.moving_mean, self.moving_variance\n output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)\n return output\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10)]\n )\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\ntf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10),\n ])"} +{"id": 225, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Residual(tf.keras.Model):\n def __init__(self, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = tf.keras.layers.Conv2D(\n num_channels, padding='same', kernel_size=3, strides=strides)\n self.conv2 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=3, padding='same')\n self.conv3 = None\n if use_1x1conv:\n self.conv3 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=1, strides=strides)\n self.bn1 = tf.keras.layers.BatchNormalization()\n self.bn2 = tf.keras.layers.BatchNormalization()\n def call(self, X):\n Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3 is not None:\n X = self.conv3(X)\n Y += X\n return tf.keras.activations.relu(Y)\nblk = Residual(3)\nX = tf.random.uniform((4, 6, 6, 3))\nY = blk(X)\nY.shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\nclass ResnetBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):\n super(ResnetBlock, self).__init__(**kwargs)\n self.residual_layers = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n self.residual_layers.append(Residual(num_channels))\n def call(self, X):\n for layer in self.residual_layers.layers:\n X = layer(X)\n return X\nb2 = ResnetBlock(64, 2, first_block=True)\nb3 = ResnetBlock(128, 2)\nb4 = ResnetBlock(256, 2)\nb5 = ResnetBlock(512, 2)\ndef net():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),\n ResnetBlock(64, 2, first_block=True),\n ResnetBlock(128, 2),\n ResnetBlock(256, 2),\n ResnetBlock(512, 2),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Dense(units=10)])\nX = tf.random.uniform(shape=(1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 226, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels):\n super(ConvBlock, self).__init__()\n self.bn = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')\n self.listLayers = [self.bn, self.relu, self.conv]\n def call(self, x):\n y = x\n for layer in self.listLayers.layers:\n y = layer(y)\n y = tf.keras.layers.concatenate([x,y], axis=-1)\n return y\nclass DenseBlock(tf.keras.layers.Layer):\n def __init__(self, num_convs, num_channels):\n super(DenseBlock, self).__init__()\n self.listLayers = []\n for _ in range(num_convs):\n self.listLayers.append(ConvBlock(num_channels))\n def call(self, x):\n for layer in self.listLayers.layers:\n x = layer(x)\n return x\nblk = DenseBlock(2, 10)\nX = tf.random.uniform((4, 8, 8, 3))\nY = blk(X)\nY.shape\nclass TransitionBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n self.batch_norm = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)\n self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)\n def call(self, x):\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.conv(x)\n return self.avg_pool(x)\nblk = TransitionBlock(10)\nblk(Y).shape\ndef block_1():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef block_2():\n net = block_1()\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = [4, 4, 4, 4]\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(TransitionBlock(num_channels))\n return net\ndef net():\n net = block_2()\n net.add(tf.keras.layers.BatchNormalization())\n net.add(tf.keras.layers.ReLU())\n net.add(tf.keras.layers.GlobalAvgPool2D())\n net.add(tf.keras.layers.Flatten())\n net.add(tf.keras.layers.Dense(10))\n return net"} +{"id": 227, "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nT = 1000\ntime = tf.range(1, T + 1, dtype=tf.float32)\nx = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = tf.Variable(tf.zeros((T - tau, tau)))\nfor i in range(tau):\n features[:, i].assign(x[i: T - tau + i])\nlabels = tf.reshape(x[tau:], (-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(1)])\n return net\nloss = tf.keras.losses.MeanSquaredError()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = tf.keras.optimizers.Adam()\n for epoch in range(epochs):\n for X, y in train_iter:\n with tf.GradientTape() as g:\n out = net(X)\n l = loss(y, out)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n trainer.apply_gradients(zip(grads, params))\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.numpy(), onestep_preds.numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = tf.Variable(tf.zeros(T))\nmultistep_preds[:n_train + tau].assign(x[:n_train + tau])\nfor i in range(n_train + tau, T):\n multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.numpy(), onestep_preds.numpy(),\n multistep_preds[n_train + tau:].numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))\nfor i in range(tau):\n features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())\nfor i in range(tau, tau + max_steps):\n features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 228, "pytorch": "import collections\nimport re\nfrom d2l import torch as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)", "tensorflow": "import collections\nimport re\nfrom d2l import tensorflow as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"} +{"id": 229, "pytorch": "import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "tensorflow": "import random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield tf.constant(X), tf.constant(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = tf.constant(corpus[offset: offset + num_tokens])\n Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])\n Xs = tf.reshape(Xs, (batch_size, -1))\n Ys = tf.reshape(Ys, (batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_batches * num_steps, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 230, "pytorch": "import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nX, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)\nH, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)\ntf.matmul(X, W_xh) + tf.matmul(H, W_hh)\ntf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))"} +{"id": 231, "pytorch": "%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)", "tensorflow": "%matplotlib inline\nimport math\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ntrain_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)\ntf.one_hot(tf.constant([0, 2]), len(vocab))\nX = tf.reshape(tf.range(10), (2, 5))\ntf.one_hot(tf.transpose(X), 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)\n W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)\n b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.init_state, self.forward_fn = init_state, forward_fn\n self.trainable_variables = get_params(vocab_size, num_hiddens)\n def __call__(self, X, state):\n X = tf.one_hot(tf.transpose(X), self.vocab_size)\n X = tf.cast(X, tf.float32)\n return self.forward_fn(X, state, self.trainable_variables)\n def begin_state(self, batch_size, *args, **kwargs):\n return self.init_state(batch_size, self.num_hiddens)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_hiddens = 512\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab):\n state = net.begin_state(batch_size=1, dtype=tf.float32)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),\n (1, 1)).numpy()\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab)\ndef grad_clipping(grads, theta):\n theta = tf.constant(theta, dtype=tf.float32)\n new_grad = []\n for grad in grads:\n if isinstance(grad, tf.IndexedSlices):\n new_grad.append(tf.convert_to_tensor(grad))\n else:\n new_grad.append(grad)\n norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()\n for grad in new_grad))\n norm = tf.cast(norm, tf.float32)\n if tf.greater(norm, theta):\n for i, grad in enumerate(new_grad):\n new_grad[i] = grad * theta / norm\n else:\n new_grad = new_grad\n return new_grad\ndef train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as g:\n y_hat, state = net(X, state)\n y = tf.reshape(tf.transpose(Y), (-1))\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n grads = grad_clipping(grads, 1)\n updater.apply_gradients(zip(grads, params))\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):\n with strategy.scope():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n updater = tf.keras.optimizers.SGD(lr)\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\n device = d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, strategy)\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\ntrain_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)"} +{"id": 232, "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')\nrnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)\nstate = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)\nstate.shape\nX = tf.random.uniform((num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(tf.keras.layers.Layer):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = tf.keras.layers.Dense(vocab_size)\n def call(self, inputs, state):\n X = tf.one_hot(tf.transpose(inputs), self.vocab_size)\n Y, *state = self.rnn(X, state)\n output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.cell.get_initial_state(*args, **kwargs)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n net = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 233, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n def three():\n return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)\n R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)\n H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_epochs, lr = 500, 1\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\ngru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')\ngru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 234, "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))\ndef lstm(inputs, state, params):\n W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params\n (H, C) = state\n outputs = []\n for X in inputs:\n X=tf.reshape(X,[-1,W_xi.shape[0]])\n I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)\n F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)\n O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)\n C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * tf.tanh(C)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,C)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\nlstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')\nlstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 235, "pytorch": "import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "tensorflow": "import os\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = tf.constant([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = tf.reduce_sum(\n tf.cast(array != vocab[''], tf.int32), 1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', tf.cast(X, tf.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', tf.cast(Y, tf.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 236, "tensorflow": "x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))", "mxnet": "x = np.arange(12)\nx.size\nX = x.reshape(3, 4)\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\nX.sum()\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))"} +{"id": 237, "tensorflow": "import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)", "mxnet": "from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)"} +{"id": 238, "tensorflow": "import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))", "mxnet": "from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nA.T\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\na = A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean()\nA.sum() / A.size\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\nA.cumsum(axis=0)\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))"} +{"id": 239, "tensorflow": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x", "mxnet": "%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x"} +{"id": 240, "tensorflow": "import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a", "mxnet": "from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()\na.grad == d / a"} +{"id": 241, "tensorflow": "%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()", "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)"} +{"id": 242, "tensorflow": "counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)", "mxnet": "counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)"} +{"id": 243, "tensorflow": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "mxnet": "%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 244, "tensorflow": "%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)", "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"} +{"id": 245, "tensorflow": "import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]", "mxnet": "from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()"} +{"id": 246, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))", "mxnet": "%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"} +{"id": 247, "tensorflow": "import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)", "mxnet": "from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"} +{"id": 248, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = tf.keras.models.Sequential()\nnet.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\nweight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)\nnet.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=.1)", "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})"} +{"id": 249, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)\ny = tf.nn.relu(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.relu(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',\n figsize=(5, 2.5))\ny = tf.nn.tanh(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.tanh(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 250, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\nb1 = tf.Variable(tf.zeros(num_hiddens))\nW2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\nb2 = tf.Variable(tf.zeros(num_outputs))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n return tf.math.maximum(X, 0)\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs))\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)\nnum_epochs, lr = 10, 0.1\nupdater = d2l.Updater([W1, W2, b1, b2], lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "mxnet": "from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))"} +{"id": 251, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nnet = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 252, "tensorflow": "import math\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))", "mxnet": "import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"} +{"id": 253, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))\n b = tf.Variable(tf.zeros(shape=(1, )))\n return [w, b]\ndef l2_penalty(w):\n return tf.reduce_sum(tf.pow(w, 2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n grads = tape.gradient(l, [w, b])\n d2l.sgd([w, b], grads, lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))\n net.build(input_shape=(1, num_inputs))\n w, b = net.trainable_variables\n loss = tf.keras.losses.MeanSquaredError()\n num_epochs, lr = 100, 0.003\n trainer = tf.keras.optimizers.SGD(learning_rate=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + net.losses\n grads = tape.gradient(l, net.trainable_variables)\n trainer.apply_gradients(zip(grads, net.trainable_variables))\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 254, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return tf.zeros_like(X)\n if dropout == 0:\n return X\n mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout\n return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)\nX = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))\nnum_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(tf.keras.Model):\n def __init__(self, num_outputs, num_hiddens1, num_hiddens2):\n super().__init__()\n self.input_layer = tf.keras.layers.Flatten()\n self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')\n self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')\n self.output_layer = tf.keras.layers.Dense(num_outputs)\n def call(self, inputs, training=None):\n x = self.input_layer(inputs)\n x = self.hidden1(x)\n if training:\n x = dropout_layer(x, dropout1)\n x = self.hidden2(x)\n if training:\n x = dropout_layer(x, dropout2)\n x = self.output_layer(x)\n return x\nnet = Net(num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout1),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout2),\n tf.keras.layers.Dense(10),\n])\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 255, "tensorflow": "trainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = tf.random.normal((4, 4))\nfor i in range(100):\n M = tf.matmul(M, tf.random.normal((4, 4)))", "mxnet": "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))"} +{"id": 256, "tensorflow": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train = train_data.shape[0]\ntrain_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)\ntest_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)\ntrain_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)\nloss = tf.keras.losses.MeanSquaredError()\ndef get_net():\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return net\ndef log_rmse(y_true, y_pred):\n clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))\n return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n net.compile(loss=loss, optimizer=optimizer)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n optimizer.apply_gradients(zip(grads, params))\n train_ls.append(log_rmse(train_labels, net(train_features)))\n if test_labels is not None:\n test_ls.append(log_rmse(test_labels, net(test_features)))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = tf.concat([X_train, X_part], 0)\n y_train = tf.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "mxnet": "%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 257, "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nX = tf.random.uniform((2, 20))\nnet(X)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, X):\n return self.out(self.hidden((X)))\nnet = MLP()\nnet(X)\nclass MySequential(tf.keras.Model):\n def __init__(self, *args):\n super().__init__()\n self.modules = []\n for block in args:\n self.modules.append(block)\n def call(self, X):\n for module in self.modules:\n X = module(X)\n return X\nnet = MySequential(\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nnet(X)\nclass FixedHiddenMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.rand_weight = tf.constant(tf.random.uniform((20, 20)))\n self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)\n def call(self, inputs):\n X = self.flatten(inputs)\n X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)\n X = self.dense(X)\n while tf.reduce_sum(tf.math.abs(X)) > 1:\n X /= 2\n return tf.reduce_sum(X)\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.net = tf.keras.Sequential()\n self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))\n self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))\n self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)\n def call(self, inputs):\n return self.dense(self.net(inputs))\nchimera = tf.keras.Sequential()\nchimera.add(NestMLP())\nchimera.add(tf.keras.layers.Dense(20))\nchimera.add(FixedHiddenMLP())\nchimera(X)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)"} +{"id": 258, "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu),\n tf.keras.layers.Dense(1),\n])\nX = tf.random.uniform((2, 4))\nnet(X)\nnet.get_weights()[1]\ndef block1(name):\n return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)\ndef block2():\n net = tf.keras.Sequential()\n for i in range(4):\n net.add(block1(name=f'block-{i}'))\n return net\nrgnet = tf.keras.Sequential()\nrgnet.add(block2())\nrgnet.add(tf.keras.layers.Dense(1))\nrgnet(X)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1)])\nnet(X)\nnet.weights[0], net.weights[1]\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1),\n])\nnet(X)\nnet.weights[0], net.weights[1]\n net = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),\n tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),\n])\nnet(X)\nclass MyInit(tf.keras.initializers.Initializer):\n def __call__(self, shape, dtype=None):\n data=tf.random.uniform(shape, -10, 10, dtype=dtype)\n factor=(tf.abs(data) >= 5)\n factor=tf.cast(factor, tf.float32)\n return data * factor\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),\n tf.keras.layers.Dense(1))\nnet(X)\nnet.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)\nnet.layers[1].weights[0][0, 0].assign(42)\nnet.layers[1].weights[0]\nlayer = CenteredLayer()\nlayer(tf.constant([1, 2, 3, 4, 5]))\nnet = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])", "mxnet": "from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()"} +{"id": 259, "tensorflow": "import tensorflow as tf\nclass CenteredLayer(tf.keras.Model):\n def __init__(self):\n super().__init__()\n def call(self, inputs):\n return inputs - tf.reduce_mean(inputs)\nY = net(tf.random.uniform((4, 8)))\ntf.reduce_mean(Y)\nclass MyDense(tf.keras.Model):\n def __init__(self, units):\n super().__init__()\n self.units = units\n def build(self, X_shape):\n self.weight = self.add_weight(name='weight',\n shape=[X_shape[-1], self.units],\n initializer=tf.random_normal_initializer())\n self.bias = self.add_weight(\n name='bias', shape=[self.units],\n initializer=tf.zeros_initializer())\n def call(self, X):\n linear = tf.matmul(X, self.weight) + self.bias\n return tf.nn.relu(linear)\ndense = MyDense(3)\ndense(tf.random.uniform((2, 5)))\ndense.get_weights()\ndense(tf.random.uniform((2, 5)))\nnet = tf.keras.models.Sequential([MyDense(8), MyDense(1)])\nnet(tf.random.uniform((2, 64)))", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))"} +{"id": 260, "tensorflow": "import numpy as np\nimport tensorflow as tf\nx = tf.range(4)\nnp.save('x-file.npy', x)\nx2 = np.load('x-file.npy', allow_pickle=True)\ny = tf.zeros(4)\nnp.save('xy-files.npy', [x, y])\nx2, y2 = np.load('xy-files.npy', allow_pickle=True)\nmydict = {'x': x, 'y': y}\nnp.save('mydict.npy', mydict)\nmydict2 = np.load('mydict.npy', allow_pickle=True)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, inputs):\n x = self.flatten(inputs)\n x = self.hidden(x)\n return self.out(x)\nnet = MLP()\nX = tf.random.uniform((2, 20))\nY = net(X)\nnet.save_weights('mlp.params')\nclone = MLP()\nclone.load_weights('mlp.params')", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')"} +{"id": 261, "tensorflow": "import tensorflow as tf\ntf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')\nlen(tf.config.experimental.list_physical_devices('GPU'))\ndef try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')\ndef try_all_gpus():\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = tf.constant([1, 2, 3])\nx.device\nwith try_gpu():\n X = tf.ones((2, 3))\nwith try_gpu(1):\n Y = tf.random.uniform((2, 3))\nwith try_gpu(1):\n Z = X\nwith try_gpu(1):\n Z2 = Z\nZ2 is Z\nstrategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n net = tf.keras.models.Sequential([\n tf.keras.layers.Dense(1)])\nnet.layers[0].weights[0].device, net.layers[0].weights[1].device", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx"} +{"id": 262, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j].assign(tf.reduce_sum(\n X[i: i + h, j: j + w] * K))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = tf.constant([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n def build(self, kernel_size):\n initializer = tf.random_normal_initializer()\n self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)\n self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)\n def call(self, inputs):\n return corr2d(inputs, self.weight) + self.bias\nX = tf.Variable(tf.ones((6, 8)))\nX[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))\nK = tf.constant([[1.0, -1.0]])\ncorr2d(tf.transpose(X), K)\nconv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)\nX = tf.reshape(X, (1, 6, 8, 1))\nY = tf.reshape(Y, (1, 6, 7, 1))\nlr = 3e-2\nY_hat = conv2d(X)\nfor i in range(10):\n with tf.GradientTape(watch_accessed_variables=False) as g:\n g.watch(conv2d.weights[0])\n Y_hat = conv2d(X)\n l = (abs(Y_hat - Y)) ** 2\n update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))\n weights = conv2d.get_weights()\n weights[0] = conv2d.weights[0] - update\n conv2d.set_weights(weights)\ntf.reshape(conv2d.get_weights()[0], (1, 2))", "mxnet": "from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))"} +{"id": 263, "tensorflow": "import tensorflow as tf\n\ndef comp_conv2d(conv2d, X):\n X = tf.reshape(X, (1, ) + X.shape + (1, ))\n Y = conv2d(X)\n return tf.reshape(Y, Y.shape[1:3])\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')\nX = tf.random.uniform(shape=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))\ncomp_conv2d(conv2d, X).shape", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 264, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d_multi_in(X, K):\n return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)\nX = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return tf.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = tf.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = tf.reshape(X, (c_i, h * w))\n K = tf.reshape(K, (c_o, c_i))\n Y = tf.matmul(K, X)\n return tf.reshape(Y, (c_o, h, w))\nX = tf.random.normal((3, 3, 3), 0, 1)\nK = tf.random.normal((2, 3, 1, 1), 0, 1)\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6", "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 265, "tensorflow": "import tensorflow as tf\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))\n elif mode =='avg':\n Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])\npool2d(X)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)\npaddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',\n strides=(2, 3))\npool2d(X_padded)\nX = tf.concat([X, X + 1], 3)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)"} +{"id": 266, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120, activation='sigmoid'),\n tf.keras.layers.Dense(84, activation='sigmoid'),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 28, 28, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\nclass TrainCallback(tf.keras.callbacks.Callback):\n def __init__(self, net, train_iter, test_iter, num_epochs, device_name):\n self.timer = d2l.Timer()\n self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n self.net = net\n self.train_iter = train_iter\n self.test_iter = test_iter\n self.num_epochs = num_epochs\n self.device_name = device_name\n def on_epoch_begin(self, epoch, logs=None):\n self.timer.start()\n def on_epoch_end(self, epoch, logs):\n self.timer.stop()\n test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']\n metrics = (logs['loss'], logs['accuracy'], test_acc)\n self.animator.add(epoch + 1, metrics)\n if epoch == self.num_epochs - 1:\n batch_size = next(iter(self.train_iter))[0].shape[0]\n num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()\ndef train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net", "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 267, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"} +{"id": 268, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef vgg_block(num_convs, num_channels):\n blk = tf.keras.models.Sequential()\n for _ in range(num_convs):\n blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))\n blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = tf.keras.models.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)]))\n return net\nnet = vgg(conv_arch)\nX = tf.random.uniform((1, 224, 224, 1))\nfor blk in net.layers:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = lambda: vgg(small_conv_arch)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)"} +{"id": 269, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef nin_block(num_channels, kernel_size, strides, padding):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])\ndef net():\n return tf.keras.models.Sequential([\n nin_block(96, kernel_size=11, strides=4, padding='valid'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Reshape((1, 1, 10)),\n tf.keras.layers.Flatten(),\n ])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"} +{"id": 270, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Inception(tf.keras.Model):\n def __init__(self, c1, c2, c3, c4):\n super().__init__()\n self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')\n self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')\n self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')\n self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')\n self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')\n self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')\n self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')\n def call(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return tf.keras.layers.Concatenate()([p1, p2, p3, p4])\ndef b1():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b2():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, 1, activation='relu'),\n tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b3():\n return tf.keras.models.Sequential([\n Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b4():\n return tf.keras.Sequential([\n Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b5():\n return tf.keras.Sequential([\n Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Flatten()\n ])\ndef net():\n return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform(shape=(1, 96, 96, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 271, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps):\n inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)\n inv *= gamma\n Y = X * inv + (beta - moving_mean * inv)\n return Y\nclass BatchNorm(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(BatchNorm, self).__init__(**kwargs)\n def build(self, input_shape):\n weight_shape = [input_shape[-1], ]\n self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)\n self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)\n self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)\n self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)\n super(BatchNorm, self).build(input_shape)\n def assign_moving_average(self, variable, value):\n momentum = 0.9\n delta = variable * momentum + value * (1 - momentum)\n return variable.assign(delta)\n @tf.function\n def call(self, inputs, training):\n if training:\n axes = list(range(len(inputs.shape) - 1))\n batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)\n batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)\n batch_mean = tf.squeeze(batch_mean, axes)\n batch_variance = tf.squeeze(batch_variance, axes)\n mean_update = self.assign_moving_average(self.moving_mean, batch_mean)\n variance_update = self.assign_moving_average(self.moving_variance, batch_variance)\n self.add_update(mean_update)\n self.add_update(variance_update)\n mean, variance = batch_mean, batch_variance\n else:\n mean, variance = self.moving_mean, self.moving_variance\n output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)\n return output\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10)]\n )\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\ntf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10),\n ])", "mxnet": "from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))"} +{"id": 272, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Residual(tf.keras.Model):\n def __init__(self, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = tf.keras.layers.Conv2D(\n num_channels, padding='same', kernel_size=3, strides=strides)\n self.conv2 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=3, padding='same')\n self.conv3 = None\n if use_1x1conv:\n self.conv3 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=1, strides=strides)\n self.bn1 = tf.keras.layers.BatchNormalization()\n self.bn2 = tf.keras.layers.BatchNormalization()\n def call(self, X):\n Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3 is not None:\n X = self.conv3(X)\n Y += X\n return tf.keras.activations.relu(Y)\nblk = Residual(3)\nX = tf.random.uniform((4, 6, 6, 3))\nY = blk(X)\nY.shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\nclass ResnetBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):\n super(ResnetBlock, self).__init__(**kwargs)\n self.residual_layers = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n self.residual_layers.append(Residual(num_channels))\n def call(self, X):\n for layer in self.residual_layers.layers:\n X = layer(X)\n return X\nb2 = ResnetBlock(64, 2, first_block=True)\nb3 = ResnetBlock(128, 2)\nb4 = ResnetBlock(256, 2)\nb5 = ResnetBlock(512, 2)\ndef net():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),\n ResnetBlock(64, 2, first_block=True),\n ResnetBlock(128, 2),\n ResnetBlock(256, 2),\n ResnetBlock(512, 2),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Dense(units=10)])\nX = tf.random.uniform(shape=(1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 273, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels):\n super(ConvBlock, self).__init__()\n self.bn = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')\n self.listLayers = [self.bn, self.relu, self.conv]\n def call(self, x):\n y = x\n for layer in self.listLayers.layers:\n y = layer(y)\n y = tf.keras.layers.concatenate([x,y], axis=-1)\n return y\nclass DenseBlock(tf.keras.layers.Layer):\n def __init__(self, num_convs, num_channels):\n super(DenseBlock, self).__init__()\n self.listLayers = []\n for _ in range(num_convs):\n self.listLayers.append(ConvBlock(num_channels))\n def call(self, x):\n for layer in self.listLayers.layers:\n x = layer(x)\n return x\nblk = DenseBlock(2, 10)\nX = tf.random.uniform((4, 8, 8, 3))\nY = blk(X)\nY.shape\nclass TransitionBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n self.batch_norm = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)\n self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)\n def call(self, x):\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.conv(x)\n return self.avg_pool(x)\nblk = TransitionBlock(10)\nblk(Y).shape\ndef block_1():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef block_2():\n net = block_1()\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = [4, 4, 4, 4]\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(TransitionBlock(num_channels))\n return net\ndef net():\n net = block_2()\n net.add(tf.keras.layers.BatchNormalization())\n net.add(tf.keras.layers.ReLU())\n net.add(tf.keras.layers.GlobalAvgPool2D())\n net.add(tf.keras.layers.Flatten())\n net.add(tf.keras.layers.Dense(10))\n return net", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))"} +{"id": 274, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nT = 1000\ntime = tf.range(1, T + 1, dtype=tf.float32)\nx = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = tf.Variable(tf.zeros((T - tau, tau)))\nfor i in range(tau):\n features[:, i].assign(x[i: T - tau + i])\nlabels = tf.reshape(x[tau:], (-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(1)])\n return net\nloss = tf.keras.losses.MeanSquaredError()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = tf.keras.optimizers.Adam()\n for epoch in range(epochs):\n for X, y in train_iter:\n with tf.GradientTape() as g:\n out = net(X)\n l = loss(y, out)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n trainer.apply_gradients(zip(grads, params))\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.numpy(), onestep_preds.numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = tf.Variable(tf.zeros(T))\nmultistep_preds[:n_train + tau].assign(x[:n_train + tau])\nfor i in range(n_train + tau, T):\n multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.numpy(), onestep_preds.numpy(),\n multistep_preds[n_train + tau:].numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))\nfor i in range(tau):\n features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())\nfor i in range(tau, tau + max_steps):\n features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 275, "tensorflow": "import collections\nimport re\nfrom d2l import tensorflow as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)", "mxnet": "import collections\nimport re\nfrom d2l import mxnet as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"} +{"id": 276, "tensorflow": "import random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield tf.constant(X), tf.constant(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = tf.constant(corpus[offset: offset + num_tokens])\n Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])\n Xs = tf.reshape(Xs, (batch_size, -1))\n Ys = tf.reshape(Ys, (batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_batches * num_steps, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "mxnet": "import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 277, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nX, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)\nH, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)\ntf.matmul(X, W_xh) + tf.matmul(H, W_hh)\ntf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))", "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))"} +{"id": 278, "tensorflow": "%matplotlib inline\nimport math\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ntrain_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)\ntf.one_hot(tf.constant([0, 2]), len(vocab))\nX = tf.reshape(tf.range(10), (2, 5))\ntf.one_hot(tf.transpose(X), 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)\n W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)\n b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.init_state, self.forward_fn = init_state, forward_fn\n self.trainable_variables = get_params(vocab_size, num_hiddens)\n def __call__(self, X, state):\n X = tf.one_hot(tf.transpose(X), self.vocab_size)\n X = tf.cast(X, tf.float32)\n return self.forward_fn(X, state, self.trainable_variables)\n def begin_state(self, batch_size, *args, **kwargs):\n return self.init_state(batch_size, self.num_hiddens)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_hiddens = 512\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab):\n state = net.begin_state(batch_size=1, dtype=tf.float32)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),\n (1, 1)).numpy()\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab)\ndef grad_clipping(grads, theta):\n theta = tf.constant(theta, dtype=tf.float32)\n new_grad = []\n for grad in grads:\n if isinstance(grad, tf.IndexedSlices):\n new_grad.append(tf.convert_to_tensor(grad))\n else:\n new_grad.append(grad)\n norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()\n for grad in new_grad))\n norm = tf.cast(norm, tf.float32)\n if tf.greater(norm, theta):\n for i, grad in enumerate(new_grad):\n new_grad[i] = grad * theta / norm\n else:\n new_grad = new_grad\n return new_grad\ndef train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as g:\n y_hat, state = net(X, state)\n y = tf.reshape(tf.transpose(Y), (-1))\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n grads = grad_clipping(grads, 1)\n updater.apply_gradients(zip(grads, params))\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):\n with strategy.scope():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n updater = tf.keras.optimizers.SGD(lr)\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\n device = d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, strategy)\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\ntrain_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)", "mxnet": "%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"} +{"id": 279, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')\nrnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)\nstate = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)\nstate.shape\nX = tf.random.uniform((num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(tf.keras.layers.Layer):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = tf.keras.layers.Dense(vocab_size)\n def call(self, inputs, state):\n X = tf.one_hot(tf.transpose(inputs), self.vocab_size)\n Y, *state = self.rnn(X, state)\n output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.cell.get_initial_state(*args, **kwargs)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n net = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"} +{"id": 280, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n def three():\n return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)\n R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)\n H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_epochs, lr = 500, 1\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\ngru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')\ngru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 281, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))\ndef lstm(inputs, state, params):\n W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params\n (H, C) = state\n outputs = []\n for X in inputs:\n X=tf.reshape(X,[-1,W_xi.shape[0]])\n I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)\n F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)\n O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)\n C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * tf.tanh(C)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,C)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\nlstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')\nlstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 282, "tensorflow": "import os\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = tf.constant([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = tf.reduce_sum(\n tf.cast(array != vocab[''], tf.int32), 1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', tf.cast(X, tf.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', tf.cast(Y, tf.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "mxnet": "import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 283, "paddle": "x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\nX.sum()\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = paddle.zeros_like(Y)\nZ = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)", "tensorflow": "x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))"} +{"id": 284, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)", "tensorflow": "import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)"} +{"id": 285, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\nA.cumsum(axis=0)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))", "tensorflow": "import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))"} +{"id": 286, "paddle": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1", "tensorflow": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"} +{"id": 287, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()\na.grad == d / a", "tensorflow": "import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a"} +{"id": 288, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000", "tensorflow": "%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()"} +{"id": 289, "paddle": "counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')", "tensorflow": "counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)"} +{"id": 290, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "tensorflow": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 291, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)", "tensorflow": "%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"} +{"id": 292, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias", "tensorflow": "import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]"} +{"id": 293, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))"} +{"id": 294, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)", "tensorflow": "import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"} +{"id": 295, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = tf.keras.models.Sequential()\nnet.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\nweight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)\nnet.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=.1)"} +{"id": 296, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)\ny = tf.nn.relu(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.relu(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',\n figsize=(5, 2.5))\ny = tf.nn.tanh(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.tanh(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 297, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\nb1 = tf.Variable(tf.zeros(num_hiddens))\nW2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\nb2 = tf.Variable(tf.zeros(num_outputs))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n return tf.math.maximum(X, 0)\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs))\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)\nnum_epochs, lr = 10, 0.1\nupdater = d2l.Updater([W1, W2, b1, b2], lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 298, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nnet = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 299, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)", "tensorflow": "import math\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)"} +{"id": 300, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))\n b = tf.Variable(tf.zeros(shape=(1, )))\n return [w, b]\ndef l2_penalty(w):\n return tf.reduce_sum(tf.pow(w, 2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n grads = tape.gradient(l, [w, b])\n d2l.sgd([w, b], grads, lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))\n net.build(input_shape=(1, num_inputs))\n w, b = net.trainable_variables\n loss = tf.keras.losses.MeanSquaredError()\n num_epochs, lr = 100, 0.003\n trainer = tf.keras.optimizers.SGD(learning_rate=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + net.losses\n grads = tape.gradient(l, net.trainable_variables)\n trainer.apply_gradients(zip(grads, net.trainable_variables))\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 301, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return tf.zeros_like(X)\n if dropout == 0:\n return X\n mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout\n return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)\nX = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))\nnum_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(tf.keras.Model):\n def __init__(self, num_outputs, num_hiddens1, num_hiddens2):\n super().__init__()\n self.input_layer = tf.keras.layers.Flatten()\n self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')\n self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')\n self.output_layer = tf.keras.layers.Dense(num_outputs)\n def call(self, inputs, training=None):\n x = self.input_layer(inputs)\n x = self.hidden1(x)\n if training:\n x = dropout_layer(x, dropout1)\n x = self.hidden2(x)\n if training:\n x = dropout_layer(x, dropout2)\n x = self.output_layer(x)\n return x\nnet = Net(num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout1),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout2),\n tf.keras.layers.Dense(10),\n])\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 302, "paddle": "trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))", "tensorflow": "trainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = tf.random.normal((4, 4))\nfor i in range(100):\n M = tf.matmul(M, tf.random.normal((4, 4)))"} +{"id": 303, "paddle": "%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "tensorflow": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train = train_data.shape[0]\ntrain_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)\ntest_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)\ntrain_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)\nloss = tf.keras.losses.MeanSquaredError()\ndef get_net():\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return net\ndef log_rmse(y_true, y_pred):\n clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))\n return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n net.compile(loss=loss, optimizer=optimizer)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n optimizer.apply_gradients(zip(grads, params))\n train_ls.append(log_rmse(train_labels, net(train_features)))\n if test_labels is not None:\n test_ls.append(log_rmse(test_labels, net(test_features)))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = tf.concat([X_train, X_part], 0)\n y_train = tf.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 304, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)", "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nX = tf.random.uniform((2, 20))\nnet(X)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, X):\n return self.out(self.hidden((X)))\nclass MySequential(tf.keras.Model):\n def __init__(self, *args):\n super().__init__()\n self.modules = []\n for block in args:\n self.modules.append(block)\n def call(self, X):\n for module in self.modules:\n X = module(X)\n return X\nnet = MySequential(\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nnet(X)\nclass FixedHiddenMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.rand_weight = tf.constant(tf.random.uniform((20, 20)))\n self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)\n def call(self, inputs):\n X = self.flatten(inputs)\n X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)\n X = self.dense(X)\n while tf.reduce_sum(tf.math.abs(X)) > 1:\n X /= 2\n return tf.reduce_sum(X)\nclass NestMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.net = tf.keras.Sequential()\n self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))\n self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))\n self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)\n def call(self, inputs):\n return self.dense(self.net(inputs))\nchimera = tf.keras.Sequential()\nchimera.add(NestMLP())\nchimera.add(tf.keras.layers.Dense(20))\nchimera.add(FixedHiddenMLP())\nchimera(X)"} +{"id": 305, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())", "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu),\n tf.keras.layers.Dense(1),\n])\nX = tf.random.uniform((2, 4))\nnet(X)\nnet.get_weights()[1]\ndef block1(name):\n return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)\ndef block2():\n net = tf.keras.Sequential()\n for i in range(4):\n net.add(block1(name=f'block-{i}'))\n return net\nrgnet = tf.keras.Sequential()\nrgnet.add(block2())\nrgnet.add(tf.keras.layers.Dense(1))\nrgnet(X)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1)])\nnet(X)\nnet.weights[0], net.weights[1]\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1),\n])\nnet(X)\nnet.weights[0], net.weights[1]\n net = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),\n tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),\n])\nnet(X)\nclass MyInit(tf.keras.initializers.Initializer):\n def __call__(self, shape, dtype=None):\n data=tf.random.uniform(shape, -10, 10, dtype=dtype)\n factor=(tf.abs(data) >= 5)\n factor=tf.cast(factor, tf.float32)\n return data * factor\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),\n tf.keras.layers.Dense(1))\nnet(X)\nnet.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)\nnet.layers[1].weights[0][0, 0].assign(42)\nnet.layers[1].weights[0]\nlayer = CenteredLayer()\nlayer(tf.constant([1, 2, 3, 4, 5]))\nnet = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])"} +{"id": 306, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))", "tensorflow": "import tensorflow as tf\nclass CenteredLayer(tf.keras.Model):\n def __init__(self):\n super().__init__()\n def call(self, inputs):\n return inputs - tf.reduce_mean(inputs)\nY = net(tf.random.uniform((4, 8)))\ntf.reduce_mean(Y)\nclass MyDense(tf.keras.Model):\n def __init__(self, units):\n super().__init__()\n self.units = units\n def build(self, X_shape):\n self.weight = self.add_weight(name='weight',\n shape=[X_shape[-1], self.units],\n initializer=tf.random_normal_initializer())\n self.bias = self.add_weight(\n name='bias', shape=[self.units],\n initializer=tf.zeros_initializer())\n def call(self, X):\n linear = tf.matmul(X, self.weight) + self.bias\n return tf.nn.relu(linear)\ndense = MyDense(3)\ndense(tf.random.uniform((2, 5)))\ndense.get_weights()\ndense(tf.random.uniform((2, 5)))\nnet = tf.keras.models.Sequential([MyDense(8), MyDense(1)])\nnet(tf.random.uniform((2, 64)))"} +{"id": 307, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()", "tensorflow": "import numpy as np\nimport tensorflow as tf\nx = tf.range(4)\nnp.save('x-file.npy', x)\nx2 = np.load('x-file.npy', allow_pickle=True)\ny = tf.zeros(4)\nnp.save('xy-files.npy', [x, y])\nx2, y2 = np.load('xy-files.npy', allow_pickle=True)\nmydict = {'x': x, 'y': y}\nnp.save('mydict.npy', mydict)\nmydict2 = np.load('mydict.npy', allow_pickle=True)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, inputs):\n x = self.flatten(inputs)\n x = self.hidden(x)\n return self.out(x)\nnet = MLP()\nX = tf.random.uniform((2, 20))\nY = net(X)\nnet.save_weights('mlp.params')\nclone = MLP()\nclone.load_weights('mlp.params')"} +{"id": 308, "paddle": "import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place", "tensorflow": "import tensorflow as tf\ntf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')\nlen(tf.config.experimental.list_physical_devices('GPU'))\ndef try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')\ndef try_all_gpus():\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = tf.constant([1, 2, 3])\nx.device\nwith try_gpu():\n X = tf.ones((2, 3))\nwith try_gpu(1):\n Y = tf.random.uniform((2, 3))\nwith try_gpu(1):\n Z = X\nwith try_gpu(1):\n Z2 = Z\nZ2 is Z\nstrategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n net = tf.keras.models.Sequential([\n tf.keras.layers.Dense(1)])\nnet.layers[0].weights[0].device, net.layers[0].weights[1].device"} +{"id": 309, "paddle": "import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j].assign(tf.reduce_sum(\n X[i: i + h, j: j + w] * K))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = tf.constant([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n def build(self, kernel_size):\n initializer = tf.random_normal_initializer()\n self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)\n self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)\n def call(self, inputs):\n return corr2d(inputs, self.weight) + self.bias\nX = tf.Variable(tf.ones((6, 8)))\nX[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))\nK = tf.constant([[1.0, -1.0]])\ncorr2d(tf.transpose(X), K)\nconv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)\nX = tf.reshape(X, (1, 6, 8, 1))\nY = tf.reshape(Y, (1, 6, 7, 1))\nlr = 3e-2\nY_hat = conv2d(X)\nfor i in range(10):\n with tf.GradientTape(watch_accessed_variables=False) as g:\n g.watch(conv2d.weights[0])\n Y_hat = conv2d(X)\n l = (abs(Y_hat - Y)) ** 2\n update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))\n weights = conv2d.get_weights()\n weights[0] = conv2d.weights[0] - update\n conv2d.set_weights(weights)\ntf.reshape(conv2d.get_weights()[0], (1, 2))"} +{"id": 310, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape", "tensorflow": "import tensorflow as tf\n\ndef comp_conv2d(conv2d, X):\n X = tf.reshape(X, (1, ) + X.shape + (1, ))\n Y = conv2d(X)\n return tf.reshape(Y, Y.shape[1:3])\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')\nX = tf.random.uniform(shape=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 311, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d_multi_in(X, K):\n return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)\nX = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return tf.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = tf.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = tf.reshape(X, (c_i, h * w))\n K = tf.reshape(K, (c_o, c_i))\n Y = tf.matmul(K, X)\n return tf.reshape(Y, (c_o, h, w))\nX = tf.random.normal((3, 3, 3), 0, 1)\nK = tf.random.normal((2, 3, 1, 1), 0, 1)\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6"} +{"id": 312, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)", "tensorflow": "import tensorflow as tf\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))\n elif mode =='avg':\n Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])\npool2d(X)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)\npaddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',\n strides=(2, 3))\npool2d(X_padded)\nX = tf.concat([X, X + 1], 3)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)"} +{"id": 313, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120, activation='sigmoid'),\n tf.keras.layers.Dense(84, activation='sigmoid'),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 28, 28, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\nclass TrainCallback(tf.keras.callbacks.Callback):\n def __init__(self, net, train_iter, test_iter, num_epochs, device_name):\n self.timer = d2l.Timer()\n self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n self.net = net\n self.train_iter = train_iter\n self.test_iter = test_iter\n self.num_epochs = num_epochs\n self.device_name = device_name\n def on_epoch_begin(self, epoch, logs=None):\n self.timer.start()\n def on_epoch_end(self, epoch, logs):\n self.timer.stop()\n test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']\n metrics = (logs['loss'], logs['accuracy'], test_acc)\n self.animator.add(epoch + 1, metrics)\n if epoch == self.num_epochs - 1:\n batch_size = next(iter(self.train_iter))[0].shape[0]\n num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()\ndef train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net"} +{"id": 314, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape:\t', X.shape)"} +{"id": 315, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef vgg_block(num_convs, num_channels):\n blk = tf.keras.models.Sequential()\n for _ in range(num_convs):\n blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))\n blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = tf.keras.models.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)]))\n return net\nnet = vgg(conv_arch)\nX = tf.random.uniform((1, 224, 224, 1))\nfor blk in net.layers:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = lambda: vgg(small_conv_arch)"} +{"id": 316, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef nin_block(num_channels, kernel_size, strides, padding):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])\ndef net():\n return tf.keras.models.Sequential([\n nin_block(96, kernel_size=11, strides=4, padding='valid'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Reshape((1, 1, 10)),\n tf.keras.layers.Flatten(),\n ])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 317, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Inception(tf.keras.Model):\n def __init__(self, c1, c2, c3, c4):\n super().__init__()\n self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')\n self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')\n self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')\n self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')\n self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')\n self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')\n self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')\n def call(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return tf.keras.layers.Concatenate()([p1, p2, p3, p4])\ndef b1():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b2():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, 1, activation='relu'),\n tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b3():\n return tf.keras.models.Sequential([\n Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b4():\n return tf.keras.Sequential([\n Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b5():\n return tf.keras.Sequential([\n Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Flatten()\n ])\ndef net():\n return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform(shape=(1, 96, 96, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 318, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps):\n inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)\n inv *= gamma\n Y = X * inv + (beta - moving_mean * inv)\n return Y\nclass BatchNorm(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(BatchNorm, self).__init__(**kwargs)\n def build(self, input_shape):\n weight_shape = [input_shape[-1], ]\n self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)\n self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)\n self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)\n self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)\n super(BatchNorm, self).build(input_shape)\n def assign_moving_average(self, variable, value):\n momentum = 0.9\n delta = variable * momentum + value * (1 - momentum)\n return variable.assign(delta)\n @tf.function\n def call(self, inputs, training):\n if training:\n axes = list(range(len(inputs.shape) - 1))\n batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)\n batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)\n batch_mean = tf.squeeze(batch_mean, axes)\n batch_variance = tf.squeeze(batch_variance, axes)\n mean_update = self.assign_moving_average(self.moving_mean, batch_mean)\n variance_update = self.assign_moving_average(self.moving_variance, batch_variance)\n self.add_update(mean_update)\n self.add_update(variance_update)\n mean, variance = batch_mean, batch_variance\n else:\n mean, variance = self.moving_mean, self.moving_variance\n output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)\n return output\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10)]\n )\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\ntf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10),\n ])"} +{"id": 319, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Residual(tf.keras.Model):\n def __init__(self, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = tf.keras.layers.Conv2D(\n num_channels, padding='same', kernel_size=3, strides=strides)\n self.conv2 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=3, padding='same')\n self.conv3 = None\n if use_1x1conv:\n self.conv3 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=1, strides=strides)\n self.bn1 = tf.keras.layers.BatchNormalization()\n self.bn2 = tf.keras.layers.BatchNormalization()\n def call(self, X):\n Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3 is not None:\n X = self.conv3(X)\n Y += X\n return tf.keras.activations.relu(Y)\nblk = Residual(3)\nX = tf.random.uniform((4, 6, 6, 3))\nY = blk(X)\nY.shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\nclass ResnetBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):\n super(ResnetBlock, self).__init__(**kwargs)\n self.residual_layers = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n self.residual_layers.append(Residual(num_channels))\n def call(self, X):\n for layer in self.residual_layers.layers:\n X = layer(X)\n return X\nb2 = ResnetBlock(64, 2, first_block=True)\nb3 = ResnetBlock(128, 2)\nb4 = ResnetBlock(256, 2)\nb5 = ResnetBlock(512, 2)\ndef net():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),\n ResnetBlock(64, 2, first_block=True),\n ResnetBlock(128, 2),\n ResnetBlock(256, 2),\n ResnetBlock(512, 2),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Dense(units=10)])\nX = tf.random.uniform(shape=(1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 320, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels):\n super(ConvBlock, self).__init__()\n self.bn = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')\n self.listLayers = [self.bn, self.relu, self.conv]\n def call(self, x):\n y = x\n for layer in self.listLayers.layers:\n y = layer(y)\n y = tf.keras.layers.concatenate([x,y], axis=-1)\n return y\nclass DenseBlock(tf.keras.layers.Layer):\n def __init__(self, num_convs, num_channels):\n super(DenseBlock, self).__init__()\n self.listLayers = []\n for _ in range(num_convs):\n self.listLayers.append(ConvBlock(num_channels))\n def call(self, x):\n for layer in self.listLayers.layers:\n x = layer(x)\n return x\nblk = DenseBlock(2, 10)\nX = tf.random.uniform((4, 8, 8, 3))\nY = blk(X)\nY.shape\nclass TransitionBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n self.batch_norm = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)\n self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)\n def call(self, x):\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.conv(x)\n return self.avg_pool(x)\nblk = TransitionBlock(10)\nblk(Y).shape\ndef block_1():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef block_2():\n net = block_1()\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = [4, 4, 4, 4]\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(TransitionBlock(num_channels))\n return net\ndef net():\n net = block_2()\n net.add(tf.keras.layers.BatchNormalization())\n net.add(tf.keras.layers.ReLU())\n net.add(tf.keras.layers.GlobalAvgPool2D())\n net.add(tf.keras.layers.Flatten())\n net.add(tf.keras.layers.Dense(10))\n return net"} +{"id": 321, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nT = 1000\ntime = tf.range(1, T + 1, dtype=tf.float32)\nx = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = tf.Variable(tf.zeros((T - tau, tau)))\nfor i in range(tau):\n features[:, i].assign(x[i: T - tau + i])\nlabels = tf.reshape(x[tau:], (-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(1)])\n return net\nloss = tf.keras.losses.MeanSquaredError()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = tf.keras.optimizers.Adam()\n for epoch in range(epochs):\n for X, y in train_iter:\n with tf.GradientTape() as g:\n out = net(X)\n l = loss(y, out)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n trainer.apply_gradients(zip(grads, params))\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.numpy(), onestep_preds.numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = tf.Variable(tf.zeros(T))\nmultistep_preds[:n_train + tau].assign(x[:n_train + tau])\nfor i in range(n_train + tau, T):\n multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.numpy(), onestep_preds.numpy(),\n multistep_preds[n_train + tau:].numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))\nfor i in range(tau):\n features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())\nfor i in range(tau, tau + max_steps):\n features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 322, "paddle": "import collections\nimport re\nfrom d2l import paddle as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)", "tensorflow": "import collections\nimport re\nfrom d2l import tensorflow as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"} +{"id": 323, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "tensorflow": "import random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield tf.constant(X), tf.constant(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = tf.constant(corpus[offset: offset + num_tokens])\n Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])\n Xs = tf.reshape(Xs, (batch_size, -1))\n Ys = tf.reshape(Ys, (batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_batches * num_steps, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 324, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nX, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)\nH, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)\ntf.matmul(X, W_xh) + tf.matmul(H, W_hh)\ntf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))"} +{"id": 325, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)", "tensorflow": "%matplotlib inline\nimport math\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ntrain_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)\ntf.one_hot(tf.constant([0, 2]), len(vocab))\nX = tf.reshape(tf.range(10), (2, 5))\ntf.one_hot(tf.transpose(X), 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)\n W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)\n b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.init_state, self.forward_fn = init_state, forward_fn\n self.trainable_variables = get_params(vocab_size, num_hiddens)\n def __call__(self, X, state):\n X = tf.one_hot(tf.transpose(X), self.vocab_size)\n X = tf.cast(X, tf.float32)\n return self.forward_fn(X, state, self.trainable_variables)\n def begin_state(self, batch_size, *args, **kwargs):\n return self.init_state(batch_size, self.num_hiddens)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_hiddens = 512\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab):\n state = net.begin_state(batch_size=1, dtype=tf.float32)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),\n (1, 1)).numpy()\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab)\ndef grad_clipping(grads, theta):\n theta = tf.constant(theta, dtype=tf.float32)\n new_grad = []\n for grad in grads:\n if isinstance(grad, tf.IndexedSlices):\n new_grad.append(tf.convert_to_tensor(grad))\n else:\n new_grad.append(grad)\n norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()\n for grad in new_grad))\n norm = tf.cast(norm, tf.float32)\n if tf.greater(norm, theta):\n for i, grad in enumerate(new_grad):\n new_grad[i] = grad * theta / norm\n else:\n new_grad = new_grad\n return new_grad\ndef train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as g:\n y_hat, state = net(X, state)\n y = tf.reshape(tf.transpose(Y), (-1))\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n grads = grad_clipping(grads, 1)\n updater.apply_gradients(zip(grads, params))\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):\n with strategy.scope():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n updater = tf.keras.optimizers.SGD(lr)\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\n device = d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, strategy)\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\ntrain_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)"} +{"id": 326, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')\nrnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)\nstate = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)\nstate.shape\nX = tf.random.uniform((num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(tf.keras.layers.Layer):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = tf.keras.layers.Dense(vocab_size)\n def call(self, inputs, state):\n X = tf.one_hot(tf.transpose(inputs), self.vocab_size)\n Y, *state = self.rnn(X, state)\n output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.cell.get_initial_state(*args, **kwargs)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n net = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 327, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n def three():\n return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)\n R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)\n H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_epochs, lr = 500, 1\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\ngru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')\ngru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 328, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))\ndef lstm(inputs, state, params):\n W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params\n (H, C) = state\n outputs = []\n for X in inputs:\n X=tf.reshape(X,[-1,W_xi.shape[0]])\n I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)\n F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)\n O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)\n C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * tf.tanh(C)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,C)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\nlstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')\nlstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 329, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "tensorflow": "import os\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = tf.constant([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = tf.reduce_sum(\n tf.cast(array != vocab[''], tf.int32), 1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', tf.cast(X, tf.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', tf.cast(Y, tf.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 330, "paddle": "x = paddle.arange(12)\nx.numel()\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)", "mxnet": "x = np.arange(12)\nx.size\nX = x.reshape(3, 4)\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))"} +{"id": 331, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)", "mxnet": "from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)"} +{"id": 332, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))", "mxnet": "from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nA.T\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\na = A.sum()\nA.mean()\nA.sum() / A.size\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))"} +{"id": 333, "paddle": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1", "mxnet": "%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"} +{"id": 334, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()", "mxnet": "from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()"} +{"id": 335, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000", "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)"} +{"id": 336, "paddle": "counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')", "mxnet": "counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)"} +{"id": 337, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "mxnet": "%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 338, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)", "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"} +{"id": 339, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\nloss = nn.MSELoss()\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias", "mxnet": "from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()"} +{"id": 340, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))", "mxnet": "%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"} +{"id": 341, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]", "mxnet": "from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]"} +{"id": 342, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())", "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})"} +{"id": 343, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 344, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "mxnet": "from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))"} +{"id": 345, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 346, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)", "mxnet": "import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)"} +{"id": 347, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 348, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 349, "paddle": "trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))", "mxnet": "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))"} +{"id": 350, "paddle": "%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "mxnet": "%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 351, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)"} +{"id": 352, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())", "mxnet": "from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()"} +{"id": 353, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))"} +{"id": 354, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')"} +{"id": 355, "paddle": "import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx"} +{"id": 356, "paddle": "import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))", "mxnet": "from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))"} +{"id": 357, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 358, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6", "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 359, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)"} +{"id": 360, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 361, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"} +{"id": 362, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)"} +{"id": 363, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)"} +{"id": 364, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 365, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))", "mxnet": "from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))"} +{"id": 366, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 367, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))"} +{"id": 368, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 369, "paddle": "import collections\nimport re\nfrom d2l import paddle as d2l", "mxnet": "import collections\nimport re\nfrom d2l import mxnet as d2l"} +{"id": 370, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "mxnet": "import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 371, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))", "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))"} +{"id": 372, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)", "mxnet": "%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"} +{"id": 373, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"} +{"id": 374, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 375, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 376, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "mxnet": "import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 377, "mxnet": "x = np.arange(12)\nx.size\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))", "pytorch": "x = torch.arange(12)\nx.numel()\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))"} +{"id": 378, "mxnet": "from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)", "pytorch": "import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)"} +{"id": 379, "mxnet": "from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\nA.mean()\nA.sum() / A.size\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))", "pytorch": "import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\nA.mean()\nA.sum() / A.numel()\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))"} +{"id": 380, "mxnet": "%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x", "pytorch": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x"} +{"id": 381, "mxnet": "from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()", "pytorch": "import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()"} +{"id": 382, "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)", "pytorch": "%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()"} +{"id": 383, "mxnet": "counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)", "pytorch": "counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)"} +{"id": 384, "mxnet": "%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "pytorch": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 385, "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)", "pytorch": "%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)"} +{"id": 386, "mxnet": "from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()", "pytorch": "import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\nloss = nn.MSELoss()\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data"} +{"id": 387, "mxnet": "%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))", "pytorch": "%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"} +{"id": 388, "mxnet": "from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]", "pytorch": "import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"} +{"id": 389, "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)"} +{"id": 390, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))", "pytorch": "%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 391, "mxnet": "from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 392, "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 393, "mxnet": "import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))", "pytorch": "import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"} +{"id": 394, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 395, "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 396, "mxnet": "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))", "pytorch": "trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))"} +{"id": 397, "mxnet": "%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "pytorch": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 398, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nnet = MLP()\nnet(X)\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"} +{"id": 399, "mxnet": "from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()", "pytorch": "import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())"} +{"id": 400, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))", "pytorch": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))"} +{"id": 401, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()"} +{"id": 402, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx", "pytorch": "import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device"} +{"id": 403, "mxnet": "from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))"} +{"id": 404, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape", "pytorch": "import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 405, "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6", "pytorch": "import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 406, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)"} +{"id": 407, "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 408, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 409, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 410, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 411, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 412, "mxnet": "from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))"} +{"id": 413, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 414, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"} +{"id": 415, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 416, "mxnet": "import collections\nimport re\nfrom d2l import mxnet as d2l", "pytorch": "import collections\nimport re\nfrom d2l import torch as d2l"} +{"id": 417, "mxnet": "import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "pytorch": "import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 418, "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))", "pytorch": "import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))"} +{"id": 419, "mxnet": "%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])", "pytorch": "%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])"} +{"id": 420, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)"} +{"id": 421, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 422, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 423, "mxnet": "import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "pytorch": "import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 424, "paddle": "x = paddle.arange(12)\nX = paddle.reshape(x, (3, 4))\npaddle.zeros((2, 3, 4))\npaddle.ones((2, 3, 4))\npaddle.randn((3, 4),'float32')\npaddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = paddle.to_tensor([1.0, 2, 4, 8])\ny = paddle.to_tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x**y\npaddle.exp(x)\nX = paddle.arange(12, dtype='float32').reshape((3, 4))\nY = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\npaddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)\na = paddle.reshape(paddle.arange(3), (3, 1))\nb = paddle.reshape(paddle.arange(2), (1, 2))\nZ = paddle.zeros_like(Y)\nZ = X + Y\nA = X.numpy()\nB = paddle.to_tensor(A)\ntype(A), type(B)\na = paddle.to_tensor([3.5])\na, a.item(), float(a), int(a)", "pytorch": "x = torch.arange(12)\nX = x.reshape(3, 4)\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))"} +{"id": 425, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nX, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)", "pytorch": "import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)"} +{"id": 426, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.to_tensor([3.0])\ny = paddle.to_tensor([2.0])\nx + y, x * y, x / y, x**y\nx = paddle.arange(4)\nA = paddle.reshape(paddle.arange(20), (5, 4))\npaddle.transpose(A, perm=[1, 0])\nB = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == paddle.transpose(B, perm=[1, 0])\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\nA = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))\nB = A.clone()\nA, A + B\na = 2\nX = paddle.reshape(paddle.arange(24), (2, 3, 4))\na + X, (a * X).shape\nx = paddle.arange(4, dtype=paddle.float32)\nprint(x, x.sum())\nA.shape, A.sum()\nA.mean(), A.sum() / A.numel()\nA.mean(axis=0), A.sum(axis=0) / A.shape[0]\nsum_A = paddle.sum(A, axis=1, keepdim=True)\ny = paddle.ones(shape=[4], dtype='float32')\nx, y, paddle.dot(x, y)\npaddle.sum(x * y)\nA.shape, x.shape, paddle.mv(A, x)\nB = paddle.ones(shape=[4, 3], dtype='float32')\npaddle.mm(A, B)\nu = paddle.to_tensor([3.0, -4.0])\npaddle.norm(u)\npaddle.abs(u).sum()\npaddle.norm(paddle.ones(shape=[4, 9], dtype='float32'))", "pytorch": "import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nA.T\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\na = A.sum()\nA.mean()\nA.sum() / A.numel()\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))"} +{"id": 427, "paddle": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import paddle as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1", "pytorch": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x\ndef numerical_lim(f, x, h):\n return (f(x + h) - f(x)) / h\nh = 0.1\nfor i in range(5):\n print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')\n h *= 0.1"} +{"id": 428, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nx = paddle.arange(4, dtype='float32')\nx = paddle.to_tensor(x, stop_gradient=False)\ny = 2 * paddle.dot(x, x)\nx.clear_gradient()\ny = paddle.sum(x)\ny.backward()\nx.grad\nx.clear_gradient()\ny = x * x\npaddle.sum(y).backward()\nx.grad\nx.clear_gradient()\ny = x * x\nu = y.detach()\nz = u * x\npaddle.sum(z).backward()\nx.grad == u\nx.clear_gradient()\npaddle.sum(y).backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while paddle.norm(b) < 1000:\n b = b * 2\n if paddle.sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)\nd = f(a)\nd.backward()", "pytorch": "import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()"} +{"id": 429, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport numpy as np\nimport paddle\nfair_probs = [1.0 / 6] * 6\npaddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000\ncounts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()\ncounts / 1000", "pytorch": "%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()"} +{"id": 430, "paddle": "counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))\ncum_counts = counts.cumsum(axis=0)\ncum_counts = cum_counts.squeeze(axis=1)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i],\n label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend()\nimport warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nhelp(paddle.ones)\npaddle.ones([4], dtype='float32')", "pytorch": "counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)"} +{"id": 431, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport time\nimport numpy as np\nimport paddle\nn = 10000\na = paddle.ones([n])\nb = paddle.ones([n])\nc = paddle.zeros([n])\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',\n ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "pytorch": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 432, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ndef synthetic_data(w, b, num_examples):\n X = paddle.normal(0, 1, (num_examples, len(w)))\n y = paddle.matmul(X, w) + b\n y += paddle.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n break\nw = paddle.normal(0, 0.01, shape=(2,1))\nb = paddle.zeros(shape=[1])\nw.stop_gradient = False\nb.stop_gradient = False\ndef linreg(X, w, b):\n return paddle.matmul(X, w) + b\n with paddle.no_grad():\n for i, param in enumerate(params):\n param -= lr * params[i].grad / batch_size\n params[i].set_value(param)\n params[i].clear_gradient()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with paddle.no_grad():\n train_l = loss(net(features, w, b), labels)", "pytorch": "%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nbatch_size = 10\nfor X, y in data_iter(batch_size, features, labels):\n print(X, '\n', y)\n break\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)"} +{"id": 433, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport paddle\ntrue_w = paddle.to_tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = paddle.io.TensorDataset(data_arrays)\n return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))\nbias_attr = paddle.ParamAttr(initializer=None)\nnet = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())\nw = net[0].weight\nb = net[0].bias", "pytorch": "import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data"} +{"id": 434, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport sys\nimport paddle\nfrom paddle.vision import transforms\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\nmnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if paddle.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = paddle.vision.datasets.FashionMNIST(mode=\"train\", transform=trans)\n mnist_test = paddle.vision.datasets.FashionMNIST(mode=\"test\", transform=trans)\n return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),\n paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers()))", "pytorch": "%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"} +{"id": 435, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom IPython import display\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))\nb = paddle.zeros(shape=(num_outputs,))\nW.stop_gradient=False\nb.stop_gradient=False\nX = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = paddle.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = paddle.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = paddle.to_tensor([0, 2])\ny_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n if len(y_hat.shape) < len(y.shape):\n cmp = y_hat.astype(y.dtype) == y.squeeze()\n else:\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, paddle.nn.Layer):\n net.eval()\n metric = Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, paddle.nn.Layer):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]", "pytorch": "import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]"} +{"id": 436, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.Normal(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters())", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)"} +{"id": 437, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.relu(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = paddle.nn.functional.sigmoid(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = paddle.tanh(x)\nd2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.clear_gradient()\ny.backward(paddle.ones_like(x), retain_graph=True)\nd2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))", "pytorch": "%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 438, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = paddle.randn([num_inputs, num_hiddens]) * 0.01\nW1.stop_gradient = False\nb1 = paddle.zeros([num_hiddens])\nb1.stop_gradient = False\nW2 = paddle.randn([num_hiddens, num_outputs]) * 0.01\nW2.stop_gradient = False\nb2 = paddle.zeros([num_outputs])\nb2.stop_gradient = False\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = paddle.zeros_like(X)\n return paddle.maximum(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 439, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Linear(256, 10))\nfor layer in net:\n if type(layer) == nn.Linear:\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))\n layer.weight_attr = weight_attr\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 440, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport numpy as np\nimport paddle\nfrom paddle import nn\ntrue_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=\n paddle.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels,\n num_epochs=400):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)\n trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2],\n labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :],\n labels[:n_train], labels[n_train:], num_epochs=1500)", "pytorch": "import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])\ntrain(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500)"} +{"id": 441, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = paddle.normal(0, 1, shape=(num_inputs, 1))\n w.stop_gradient = False\n b = paddle.zeros(shape=[1])\n b.stop_gradient = False\n return [w, b]\ndef l2_penalty(w):\n return paddle.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))\n net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))\n loss = nn.MSELoss()\n num_epochs, lr = 100, 0.003\n trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n trainer.step()\n trainer.clear_grad()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 442, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport random\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return paddle.zeros_like(X)\n if dropout == 0:\n return X\n mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')\n return mask * X / (1.0 - dropout)\nX= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Layer):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,\n is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nweight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256, weight_attr=weight_attr),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10, weight_attr=weight_attr))\ntrainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 443, "paddle": "trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nx = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')\nx.stop_gradient = False\ny = paddle.nn.functional.sigmoid(x)\ny.backward(paddle.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],\n legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = paddle.normal(0, 1, shape=(4,4))\nfor i in range(100):\n M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4)))", "pytorch": "trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))"} +{"id": 444, "paddle": "%matplotlib inline\nimport warnings\nimport numpy as np\nimport pandas as pd\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nfrom d2l import paddle as d2l\nn_train = train_data.shape[0]\ntrain_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)\ntest_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)\ntrain_labels = paddle.to_tensor(\n train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = paddle.clip(net(features), 1, float('inf'))\n rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n optimizer.clear_grad()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = paddle.concat([X_train, X_part], 0)\n y_train = paddle.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid", "pytorch": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid"} +{"id": 445, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = paddle.rand([2, 20])\nnet(X)\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Layer):\n def __init__(self, *layers):\n super(MySequential, self).__init__()\n if len(layers) > 0 and isinstance(layers[0], tuple):\n for name, layer in layers:\n self.add_sublayer(name, layer)\n else:\n for idx, layer in enumerate(layers):\n self.add_sublayer(str(idx), layer)\n def forward(self, X):\n for layer in self._sub_layers.values():\n X = layer(X)\n return X\nclass FixedHiddenMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.rand_weight = paddle.rand([20, 20])\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"} +{"id": 446, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = paddle.rand([2, 4])\nnet(X)\nnet.state_dict()['2.bias']\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_sublayer(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Normal(mean=0.0, std=0.01)\n paddle.zeros(m.bias)\nnet.apply(init_normal)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef init_constant(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(value = 1)\n paddle.zeros(m.bias)\nnet.apply(init_constant)\nnet[0].weight[0],net[0].state_dict()['bias']\ndef xavier(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.XavierUniform(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n paddle.nn.initializer.Constant(42)\nnet[0].apply(xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n for name, param in m.named_parameters()][0])\n paddle.nn.initializer.XavierUniform(m.weight, -10, 10)\n h = paddle.abs(m.weight) >= 5\n h = paddle.to_tensor(h)\n m = paddle.to_tensor(m.weight)\n m *= h\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.set_value(net[0].weight.numpy() + 1)\nval = net[0].weight.numpy()\nval[0, 0] = 42\nnet[0].weight.set_value(val)\nnet[0].weight[0]\nlayer = CenteredLayer()\nlayer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))", "pytorch": "import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))"} +{"id": 447, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nclass CenteredLayer(nn.Layer):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(paddle.rand([4, 8]))\nY.mean()\nclass MyLinear(nn.Layer):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')\n self.bias = paddle.create_parameter(shape=(units,), dtype='float32')\n def forward(self, X):\n linear = paddle.matmul(X, self.weight) + self.bias\n return F.relu(linear)\nlinear(paddle.randn([2, 5]))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(paddle.rand([2, 64]))", "pytorch": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))"} +{"id": 448, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nx = paddle.arange(4)\npaddle.save(x, 'x-file')\nx2 = paddle.load('x-file')\ny = paddle.zeros([4])\npaddle.save([x,y], 'x-file')\nx2, y2 = paddle.load('x-file')\nmydict = {'x': x, 'y': y}\npaddle.save(mydict, 'mydict')\nmydict2 = paddle.load('mydict')\nclass MLP(nn.Layer):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = paddle.randn(shape=[2, 20])\nY = net(X)\npaddle.save(net.state_dict(), 'mlp.pdparams')\nclone = MLP()\nclone.set_state_dict(paddle.load('mlp.pdparams'))\nclone.eval()", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()"} +{"id": 449, "paddle": "import paddle\nfrom paddle import nn\npaddle.device.set_device(\"cpu\"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)\npaddle.device.cuda.device_count()\n if paddle.device.cuda.device_count() >= i + 1:\n return paddle.CUDAPlace(i)\n return paddle.CPUPlace()\ndef try_all_gpus():\n devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]\n return devices if devices else paddle.CPUPlace()\ntry_gpu(),try_gpu(10),try_all_gpus()\nx = paddle.to_tensor([1, 2, 3])\nx.place\nX = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())\nY = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))\nnet = nn.Sequential(nn.Linear(3, 1))\nnet=net.to(try_gpu())\nnet[0].weight.place", "pytorch": "import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device"} +{"id": 450, "paddle": "import warningsfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef corr2d(X, K):\n h, w = K.shape\n Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Layer):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = paddle.ParamAttr(paddle.rand(kernel_size))\n self.bias = paddle.ParamAttr(paddle.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = paddle.ones((6, 8))\nX[:, 2:6] = 0\nK = paddle.to_tensor([[1.0, -1.0]])\nconv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.clear_gradients()\n l.sum().backward()\n with paddle.no_grad():\n conv2d.weight[:] -= lr * conv2d.weight.grad\nconv2d.weight.reshape((1, 2))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))"} +{"id": 451, "paddle": "import warnings\nwarnings.filterwarnings(action='ignore')\nimport paddle\nfrom paddle import nn\n\ndef comp_conv2d(conv2d, X):\n X = paddle.reshape(X, [1, 1] + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)\nX = paddle.rand((8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape", "pytorch": "import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 452, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = paddle.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = paddle.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = paddle.normal(0, 1, (3, 3, 3))\nK = paddle.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(paddle.abs(Y1 - Y2).sum()) < 1e-6", "pytorch": "import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 453, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = paddle.arange(16, dtype=\"float32\").reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3, stride=3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))\npool2d(X)\nX = paddle.concat((X, X + 1), 1)\npool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)\npool2d(X)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)"} +{"id": 454, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn, optimizer\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = paddle.rand((1, 1, 28, 28), 'float32')\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2D:\n nn.initializer.XavierUniform(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.clear_grad()\n X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with paddle.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 455, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nnet = nn.Sequential(\n nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2),\n nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 456, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2D(kernel_size=2, stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(*conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5), nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = paddle.randn(shape=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 457, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(out_channels, out_channels, kernel_size=1),\n nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 458, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nclass Inception(nn.Layer):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return paddle.concat(x=[p1, p2, p3, p4], axis=1)\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2,padding=1))\nb2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2D(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = paddle.rand(shape=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 459, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):\n if not is_training:\n X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = paddle.mean(X)\n var = paddle.mean(((X - mean) ** 2))\n else:\n mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)\n var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / (var + eps) ** 0.5\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Layer):\n def __init__(self, num_features, num_dims=4):\n super(BatchNorm, self).__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))\n self.beta = self.create_parameter(\n attr=None,\n shape=shape,\n dtype='float32',\n is_bias=False,\n default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))\n self.moving_mean = paddle.zeros(shape=shape, dtype='float32')\n self.moving_var = paddle.zeros(shape=shape, dtype='float32')\n def forward(self, X):\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)\n return Y\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nparam = net.parameters()\nprint('gamma:', param[2].numpy().reshape(-1))\nprint('beta:', param[3].numpy().reshape(-1))\nnet = nn.Sequential(\n nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),\n nn.MaxPool2D(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),\n nn.Linear(84, 10))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))"} +{"id": 460, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\nfrom paddle.nn import functional as F\nclass Residual(nn.Layer):\n def __init__(self, input_channels, num_channels, use_1x1conv=False,\n strides=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2D(num_channels)\n self.bn2 = nn.BatchNorm2D(num_channels)\n self.relu = nn.ReLU()\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3, 3)\nX = paddle.rand([4, 3, 6, 6])\nY = blk(X)\nY.shape\nblk = Residual(3, 6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2D((1, 1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = paddle.rand(shape=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 461, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn as nn\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Layer):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = paddle.concat(x=[X, Y], axis=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = paddle.randn([4, 3, 8, 8])\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2D(input_channels), nn.ReLU(),\n nn.Conv2D(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2D(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2D(64), nn.ReLU(),\n nn.MaxPool2D(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2D(num_channels), nn.ReLU(),\n nn.AdaptiveMaxPool2D((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"} +{"id": 462, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nT = 1000\ntime = paddle.arange(1, T + 1, dtype=paddle.float32)\nx = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = paddle.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.initializer.XavierUniform(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())\n for epoch in range(epochs):\n for i,(X, y) in enumerate (train_iter()):\n trainer.clear_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = paddle.zeros([T])\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape([-1])\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 463, "paddle": "import collections\nimport re\nfrom d2l import paddle as d2l", "pytorch": "import collections\nimport re\nfrom d2l import torch as d2l"} +{"id": 464, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport random\nimport paddle\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield paddle.to_tensor(X), paddle.to_tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])\n Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "pytorch": "import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 465, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nX, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))\nH, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))\npaddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)\npaddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0))", "pytorch": "import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))"} +{"id": 466, "paddle": "%matplotlib inline\nimport warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport math\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(paddle.to_tensor([0, 2]), len(vocab))\nX = paddle.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)* 0.01\n W_xh = normal([num_inputs, num_hiddens])\n W_hh = normal([num_hiddens, num_hiddens])\n b_h = paddle.zeros(shape=[num_hiddens])\n W_hq = normal([num_hiddens, num_outputs])\n b_q = paddle.zeros(shape=[num_outputs])\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient=False\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (paddle.zeros(shape=[batch_size, num_hiddens]), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)\n Y = paddle.mm(H, W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(x=outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size):\n return self.init_state(batch_size, self.num_hiddens)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Layer):\n params = [p for p in net.parameters() if not p.stop_gradient]\n else:\n params = net.params\n norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n with paddle.no_grad():\n for param in params:\n param.grad.set_value(param.grad * theta / norm)\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0])\n else:\n if isinstance(net, nn.Layer) and not isinstance(state, tuple):\n state.stop_gradient=True\n else:\n for s in state:\n s.stop_gradient=True\n y = paddle.reshape(Y.T,shape=[-1])\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n if isinstance(updater, paddle.optimizer.Optimizer):\n updater.clear_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)", "pytorch": "%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"} +{"id": 467, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nfrom paddle import nn\nfrom paddle.nn import functional as F\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)\nstate = paddle.zeros(shape=[1, batch_size, num_hiddens])\nstate.shape\nX = paddle.rand(shape=[num_steps, batch_size, len(vocab)])\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if self.rnn.num_directions==1:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])\n else:\n return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),\n paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1.0\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"} +{"id": 468, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as F\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H,*_ = state\n outputs = []\n for X in inputs:\n Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H,*_)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 469, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport paddle\nimport paddle.nn.functional as Function\nfrom paddle import nn\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return paddle.randn(shape=shape)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = paddle.zeros([num_outputs])\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.stop_gradient = False\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * paddle.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return paddle.concat(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1.0\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 470, "paddle": "import warnings\nfrom d2l import paddle as d2l\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport paddle\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(paddle.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(paddle.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y..astype(paddle.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "pytorch": "import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 471, "tensorflow": "x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))", "pytorch": "x = torch.arange(12)\nx.numel()\nX = x.reshape(3, 4)\ntorch.zeros((2, 3, 4))\ntorch.ones((2, 3, 4))\ntorch.randn(3, 4)\ntorch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = torch.tensor([1.0, 2, 4, 8])\ny = torch.tensor([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntorch.exp(x)\nX = torch.arange(12, dtype=torch.float32).reshape((3,4))\nY = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntorch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1)\nX.sum()\na = torch.arange(3).reshape((3, 1))\nb = torch.arange(2).reshape((1, 2))\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = torch.zeros_like(Y)\nZ[:] = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.numpy()\nB = torch.tensor(A)\na = torch.tensor([3.5])\nprint(a, a.item(), float(a), int(a))"} +{"id": 472, "tensorflow": "import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)", "pytorch": "import torch\nX, y = torch.tensor(inputs.values), torch.tensor(outputs.values)"} +{"id": 473, "tensorflow": "import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))", "pytorch": "import torch\nx = torch.tensor(3.0)\ny = torch.tensor(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = torch.arange(4)\nA = torch.arange(20).reshape(5, 4)\nA.T\nB = torch.tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = torch.arange(24).reshape(2, 3, 4)\nA = torch.arange(20, dtype=torch.float32).reshape(5, 4)\nB = A.clone()\nprint(A, A + B)\na = 2\nX = torch.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = torch.arange(4, dtype=torch.float32)\nprint(x, x.sum())\na = A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean()\nA.sum() / A.numel()\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\nA.cumsum(axis=0)\ny = torch.ones(4, dtype = torch.float32)\nprint(torch.dot(x, y))\ntorch.sum(x * y)\nA.shape, x.shape, torch.mv(A, x)\nB = torch.ones(4, 3)\ntorch.mm(A, B)\nu = torch.tensor([3.0, -4.0])\ntorch.norm(u)\ntorch.abs(u).sum()\ntorch.norm(torch.ones((4, 9)))"} +{"id": 474, "tensorflow": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x", "pytorch": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import torch as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x"} +{"id": 475, "tensorflow": "import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a", "pytorch": "import torch\nx = torch.arange(4.0)\nx.requires_grad_(True)\nx.grad\ny = 2 * torch.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nx.grad.zero_()\ny = x.sum()\ny.backward()\nx.grad\nx.grad.zero_()\ny = x * x\ny.sum().backward()\nx.grad\nx.grad.zero_()\ny = x * x\nu = y.detach()\nz = u * x\nz.sum().backward()\nx.grad == u\nx.grad.zero_()\ny.sum().backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while b.norm() < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = torch.randn(size=(), requires_grad=True)\nd = f(a)\nd.backward()\na.grad == d / a"} +{"id": 476, "tensorflow": "%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()", "pytorch": "%matplotlib inline\nimport torch\nfrom torch.distributions import multinomial\nfrom d2l import torch as d2l\nfair_probs = torch.ones([6]) / 6\nmultinomial.Multinomial(1, fair_probs).sample()\nmultinomial.Multinomial(10, fair_probs).sample()\ncounts = multinomial.Multinomial(1000, fair_probs).sample()"} +{"id": 477, "tensorflow": "counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)", "pytorch": "counts = multinomial.Multinomial(10, fair_probs).sample((500,))\ncum_counts = counts.cumsum(dim=0)\nestimates = cum_counts / cum_counts.sum(dim=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport torch\na = dir(torch.distributions)\nhelp(torch.ones)\ntorch.ones(4)"} +{"id": 478, "tensorflow": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])", "pytorch": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport torch\nfrom d2l import torch as d2l\nn = 10000\na = torch.ones(n)\nb = torch.ones(n)\nc = torch.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]"} +{"id": 479, "tensorflow": "%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)", "pytorch": "%matplotlib inline\nimport random\nimport torch\nfrom d2l import torch as d2l\ndef synthetic_data(w, b, num_examples):\n X = torch.normal(0, 1, (num_examples, len(w)))\n y = torch.matmul(X, w) + b\n y += torch.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = torch.tensor(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = torch.normal(0, 0.01, size=(2,1), requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\ndef linreg(X, w, b):\n return torch.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\ndef sgd(params, lr, batch_size):\n with torch.no_grad():\n for param in params:\n param -= lr * param.grad / batch_size\n param.grad.zero_()\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n l = loss(net(X, w, b), y)\n l.sum().backward()\n sgd([w, b], lr, batch_size)\n with torch.no_grad():\n train_l = loss(net(features, w, b), labels)"} +{"id": 480, "tensorflow": "import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]", "pytorch": "import numpy as np\nimport torch\nfrom torch.utils import data\nfrom d2l import torch as d2l\ntrue_w = torch.tensor([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(2, 1))\nnet[0].weight.data.normal_(0, 0.01)\nnet[0].bias.data.fill_(0)\nloss = nn.MSELoss()\ntrainer = torch.optim.SGD(net.parameters(), lr=0.03)\nw = net[0].weight.data\nb = net[0].bias.data"} +{"id": 481, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))", "pytorch": "%matplotlib inline\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import transforms\nfrom d2l import torch as d2l\nd2l.use_svg_display()\ntrans = transforms.ToTensor()\nmnist_train = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=True, transform=trans, download=True)\nmnist_test = torchvision.datasets.FashionMNIST(\n root=\"../data\", train=False, transform=trans, download=True)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n if torch.is_tensor(img):\n ax.imshow(img.numpy())\n else:\n ax.imshow(img)\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))\nshow_images(X.reshape(18, 28, 28), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 4\ntrain_iter = data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n trans = [transforms.ToTensor()]\n if resize:\n trans.insert(0, transforms.Resize(resize))\n trans = transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=\"../data\", train=True, transform=trans, download=True)\n mnist_test = torchvision.datasets.FashionMNIST(root=\"../data\", train=False, transform=trans, download=True)\n return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))"} +{"id": 482, "tensorflow": "import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)", "pytorch": "import torch\nfrom IPython import display\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)\nb = torch.zeros(num_outputs, requires_grad=True)\nX = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdim=True), X.sum(1, keepdim=True)\ndef softmax(X):\n X_exp = torch.exp(X)\n partition = X_exp.sum(1, keepdim=True)\n return X_exp / partition\nX = torch.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)\ny = torch.tensor([0, 2])\ny_hat = torch.tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - torch.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.type(y.dtype) == y\n return float(cmp.type(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n if isinstance(net, torch.nn.Module):\n net.eval()\n metric = Accumulator(2)\n with torch.no_grad():\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), y.numel())\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n if isinstance(net, torch.nn.Module):\n net.train()\n metric = Accumulator(3)\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.mean().backward()\n updater.step()\n else:\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"} +{"id": 483, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = tf.keras.models.Sequential()\nnet.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\nweight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)\nnet.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=.1)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=0.1)"} +{"id": 484, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)\ny = tf.nn.relu(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.relu(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',\n figsize=(5, 2.5))\ny = tf.nn.tanh(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.tanh(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))", "pytorch": "%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.relu(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward(torch.ones_like(x), retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\ny = torch.sigmoid(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\ny = torch.tanh(x)\nd2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5))\nx.grad.data.zero_()\ny.backward(torch.ones_like(x),retain_graph=True)\nd2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 485, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\nb1 = tf.Variable(tf.zeros(num_hiddens))\nW2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\nb2 = tf.Variable(tf.zeros(num_outputs))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n return tf.math.maximum(X, 0)\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs))\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)\nnum_epochs, lr = 10, 0.1\nupdater = d2l.Updater([W1, W2, b1, b2], lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = nn.Parameter(torch.randn(\n num_inputs, num_hiddens, requires_grad=True) * 0.01)\nb1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))\nW2 = nn.Parameter(torch.randn(\n num_hiddens, num_outputs, requires_grad=True) * 0.01)\nb2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n a = torch.zeros_like(X)\n return torch.max(X, a)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(X@W1 + b1)\n return (H@W2 + b2)\nloss = nn.CrossEntropyLoss(reduction='none')\nnum_epochs, lr = 10, 0.1\nupdater = torch.optim.SGD(params, lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 486, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nnet = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = nn.CrossEntropyLoss(reduction='none')\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 487, "tensorflow": "import math\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))", "pytorch": "import math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\ntrue_w, features, poly_features, labels = [torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = nn.MSELoss(reduction='none')\n input_shape = train_features.shape[-1]\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)\n test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"} +{"id": 488, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))\n b = tf.Variable(tf.zeros(shape=(1, )))\n return [w, b]\ndef l2_penalty(w):\n return tf.reduce_sum(tf.pow(w, 2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n grads = tape.gradient(l, [w, b])\n d2l.sgd([w, b], grads, lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))\n net.build(input_shape=(1, num_inputs))\n w, b = net.trainable_variables\n loss = tf.keras.losses.MeanSquaredError()\n num_epochs, lr = 100, 0.003\n trainer = tf.keras.optimizers.SGD(learning_rate=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + net.losses\n grads = tape.gradient(l, net.trainable_variables)\n trainer.apply_gradients(zip(grads, net.trainable_variables))\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = torch.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)\n b = torch.zeros(1, requires_grad=True)\n return [w, b]\ndef l2_penalty(w):\n return torch.sum(w.pow(2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.sum().backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential(nn.Linear(num_inputs, 1))\n for param in net.parameters():\n param.data.normal_()\n loss = nn.MSELoss(reduction='none')\n num_epochs, lr = 100, 0.003\n trainer = torch.optim.SGD([{\"params\":net[0].weight,'weight_decay': wd}, {\"params\":net[0].bias}], lr=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.mean().backward()\n trainer.step()\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1,\n (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 489, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return tf.zeros_like(X)\n if dropout == 0:\n return X\n mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout\n return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)\nX = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))\nnum_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(tf.keras.Model):\n def __init__(self, num_outputs, num_hiddens1, num_hiddens2):\n super().__init__()\n self.input_layer = tf.keras.layers.Flatten()\n self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')\n self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')\n self.output_layer = tf.keras.layers.Dense(num_outputs)\n def call(self, inputs, training=None):\n x = self.input_layer(inputs)\n x = self.hidden1(x)\n if training:\n x = dropout_layer(x, dropout1)\n x = self.hidden2(x)\n if training:\n x = dropout_layer(x, dropout2)\n x = self.output_layer(x)\n return x\nnet = Net(num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout1),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout2),\n tf.keras.layers.Dense(10),\n])\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return torch.zeros_like(X)\n if dropout == 0:\n return X\n mask = (torch.rand(X.shape) > dropout).float()\n return mask * X / (1.0 - dropout)\nX= torch.arange(16, dtype = torch.float32).reshape((2, 8))\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(nn.Module):\n def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):\n super(Net, self).__init__()\n self.num_inputs = num_inputs\n self.training = is_training\n self.lin1 = nn.Linear(num_inputs, num_hiddens1)\n self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)\n self.lin3 = nn.Linear(num_hiddens2, num_outputs)\n self.relu = nn.ReLU()\n def forward(self, X):\n H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))\n if self.training == True:\n H1 = dropout_layer(H1, dropout1)\n H2 = self.relu(self.lin2(H1))\n if self.training == True:\n H2 = dropout_layer(H2, dropout2)\n out = self.lin3(H2)\n return out\nnet = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = nn.CrossEntropyLoss(reduction='none')\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = nn.Sequential(nn.Flatten(),\n nn.Linear(784, 256),\n nn.ReLU(),\n nn.Dropout(dropout1),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(dropout2),\n nn.Linear(256, 10))\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, std=0.01)\nnet.apply(init_weights);\ntrainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 490, "tensorflow": "trainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = tf.random.normal((4, 4))\nfor i in range(100):\n M = tf.matmul(M, tf.random.normal((4, 4)))", "pytorch": "trainer = torch.optim.SGD(net.parameters(), lr=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport torch\nfrom d2l import torch as d2l\nx = torch.arange(-8.0, 8.0, 0.1, requires_grad=True)\ny = torch.sigmoid(x)\ny.backward(torch.ones_like(x))\nd2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = torch.normal(0, 1, size=(4,4))\nfor i in range(100):\n M = torch.mm(M,torch.normal(0, 1, size=(4, 4)))"} +{"id": 491, "tensorflow": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train = train_data.shape[0]\ntrain_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)\ntest_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)\ntrain_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)\nloss = tf.keras.losses.MeanSquaredError()\ndef get_net():\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return net\ndef log_rmse(y_true, y_pred):\n clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))\n return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n net.compile(loss=loss, optimizer=optimizer)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n optimizer.apply_gradients(zip(grads, params))\n train_ls.append(log_rmse(train_labels, net(train_features)))\n if test_labels is not None:\n test_ls.append(log_rmse(test_labels, net(test_features)))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = tf.concat([X_train, X_part], 0)\n y_train = tf.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "pytorch": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nn_train = train_data.shape[0]\ntrain_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32)\ntest_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32)\ntrain_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)\nloss = nn.MSELoss()\nin_features = train_features.shape[1]\ndef get_net():\n net = nn.Sequential(nn.Linear(in_features,1))\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = torch.clamp(net(features), 1, float('inf'))\n rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))\n return rmse.item()\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = torch.optim.Adam(net.parameters(), lr = learning_rate, weight_decay = weight_decay)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n optimizer.zero_grad()\n l = loss(net(X), y)\n l.backward()\n optimizer.step()\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = torch.cat([X_train, X_part], 0)\n y_train = torch.cat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).detach().numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 492, "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nX = tf.random.uniform((2, 20))\nnet(X)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, X):\n return self.out(self.hidden((X)))\nclass MySequential(tf.keras.Model):\n def __init__(self, *args):\n super().__init__()\n self.modules = []\n for block in args:\n self.modules.append(block)\n def call(self, X):\n for module in self.modules:\n X = module(X)\n return X\nnet = MySequential(\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nnet(X)\nclass FixedHiddenMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.rand_weight = tf.constant(tf.random.uniform((20, 20)))\n self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)\n def call(self, inputs):\n X = self.flatten(inputs)\n X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)\n X = self.dense(X)\n while tf.reduce_sum(tf.math.abs(X)) > 1:\n X /= 2\n return tf.reduce_sum(X)\nclass NestMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.net = tf.keras.Sequential()\n self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))\n self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))\n self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)\n def call(self, inputs):\n return self.dense(self.net(inputs))\nchimera = tf.keras.Sequential()\nchimera.add(NestMLP())\nchimera.add(tf.keras.layers.Dense(20))\nchimera.add(FixedHiddenMLP())\nchimera(X)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nnet = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nX = torch.rand(2, 20)\nnet(X)\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.out = nn.Linear(256, 10)\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self._modules[str(idx)] = module\n def forward(self, X):\n for block in self._modules.values():\n X = block(X)\n return X\nnet = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\nnet(X)\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.rand_weight = torch.rand((20, 20), requires_grad=False)\n self.linear = nn.Linear(20, 20)\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(torch.mm(X, self.rand_weight) + 1)\n X = self.linear(X)\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU())\n self.linear = nn.Linear(32, 16)\n def forward(self, X):\n return self.linear(self.net(X))\nchimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())\nchimera(X)"} +{"id": 493, "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu),\n tf.keras.layers.Dense(1),\n])\nX = tf.random.uniform((2, 4))\nnet(X)\nnet.get_weights()[1]\ndef block1(name):\n return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)\ndef block2():\n net = tf.keras.Sequential()\n for i in range(4):\n net.add(block1(name=f'block-{i}'))\n return net\nrgnet = tf.keras.Sequential()\nrgnet.add(block2())\nrgnet.add(tf.keras.layers.Dense(1))\nrgnet(X)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1)])\nnet(X)\nnet.weights[0], net.weights[1]\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1),\n])\nnet(X)\nnet.weights[0], net.weights[1]\n net = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),\n tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),\n])\nnet(X)\nclass MyInit(tf.keras.initializers.Initializer):\n def __call__(self, shape, dtype=None):\n data=tf.random.uniform(shape, -10, 10, dtype=dtype)\n factor=(tf.abs(data) >= 5)\n factor=tf.cast(factor, tf.float32)\n return data * factor\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),\n tf.keras.layers.Dense(1))\nnet(X)\nnet.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)\nnet.layers[1].weights[0][0, 0].assign(42)\nnet.layers[1].weights[0]\nlayer = CenteredLayer()\nlayer(tf.constant([1, 2, 3, 4, 5]))\nnet = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])", "pytorch": "import torch\nfrom torch import nn\nnet = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))\nX = torch.rand(size=(2, 4))\nnet(X)\nnet.state_dict()['2.bias'].data\ndef block1():\n return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())\ndef block2():\n net = nn.Sequential()\n for i in range(4):\n net.add_module(f'block {i}', block1())\n return net\nrgnet = nn.Sequential(block2(), nn.Linear(4, 1))\nrgnet(X)\ndef init_normal(m):\n if type(m) == nn.Linear:\n nn.init.normal_(m.weight, mean=0, std=0.01)\n nn.init.zeros_(m.bias)\nnet.apply(init_normal)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_constant(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 1)\n nn.init.zeros_(m.bias)\nnet.apply(init_constant)\nnet[0].weight.data[0], net[0].bias.data[0]\ndef init_xavier(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef init_42(m):\n if type(m) == nn.Linear:\n nn.init.constant_(m.weight, 42)\nnet[0].apply(init_xavier)\nnet[2].apply(init_42)\ndef my_init(m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -10, 10)\n m.weight.data *= m.weight.data.abs() >= 5\nnet.apply(my_init)\nnet[0].weight[:2]\nnet[0].weight.data[:] += 1\nnet[0].weight.data[0, 0] = 42\nnet[0].weight.data[0]\nlayer = CenteredLayer()\nlayer(torch.FloatTensor([1, 2, 3, 4, 5]))\nnet = nn.Sequential(nn.Linear(8, 128), CenteredLayer())"} +{"id": 494, "tensorflow": "import tensorflow as tf\nclass CenteredLayer(tf.keras.Model):\n def __init__(self):\n super().__init__()\n def call(self, inputs):\n return inputs - tf.reduce_mean(inputs)\nY = net(tf.random.uniform((4, 8)))\ntf.reduce_mean(Y)\nclass MyDense(tf.keras.Model):\n def __init__(self, units):\n super().__init__()\n self.units = units\n def build(self, X_shape):\n self.weight = self.add_weight(name='weight',\n shape=[X_shape[-1], self.units],\n initializer=tf.random_normal_initializer())\n self.bias = self.add_weight(\n name='bias', shape=[self.units],\n initializer=tf.zeros_initializer())\n def call(self, X):\n linear = tf.matmul(X, self.weight) + self.bias\n return tf.nn.relu(linear)\ndense = MyDense(3)\ndense(tf.random.uniform((2, 5)))\ndense.get_weights()\ndense(tf.random.uniform((2, 5)))\nnet = tf.keras.models.Sequential([MyDense(8), MyDense(1)])\nnet(tf.random.uniform((2, 64)))", "pytorch": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nclass CenteredLayer(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, X):\n return X - X.mean()\nY = net(torch.rand(4, 8))\nY.mean()\nclass MyLinear(nn.Module):\n def __init__(self, in_units, units):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(in_units, units))\n self.bias = nn.Parameter(torch.randn(units,))\n def forward(self, X):\n linear = torch.matmul(X, self.weight.data) + self.bias.data\n return F.relu(linear)\nlinear = MyLinear(5, 3)\nlinear.weight\nlinear(torch.rand(2, 5))\nnet = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))\nnet(torch.rand(2, 64))"} +{"id": 495, "tensorflow": "import numpy as np\nimport tensorflow as tf\nx = tf.range(4)\nnp.save('x-file.npy', x)\nx2 = np.load('x-file.npy', allow_pickle=True)\ny = tf.zeros(4)\nnp.save('xy-files.npy', [x, y])\nx2, y2 = np.load('xy-files.npy', allow_pickle=True)\nmydict = {'x': x, 'y': y}\nnp.save('mydict.npy', mydict)\nmydict2 = np.load('mydict.npy', allow_pickle=True)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, inputs):\n x = self.flatten(inputs)\n x = self.hidden(x)\n return self.out(x)\nnet = MLP()\nX = tf.random.uniform((2, 20))\nY = net(X)\nnet.save_weights('mlp.params')\nclone = MLP()\nclone.load_weights('mlp.params')", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nx = torch.arange(4)\ntorch.save(x, 'x-file')\nx2 = torch.load('x-file')\ny = torch.zeros(4)\ntorch.save([x, y],'x-files')\nx2, y2 = torch.load('x-files')\nmydict = {'x': x, 'y': y}\ntorch.save(mydict, 'mydict')\nmydict2 = torch.load('mydict')\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.hidden = nn.Linear(20, 256)\n self.output = nn.Linear(256, 10)\n def forward(self, x):\n return self.output(F.relu(self.hidden(x)))\nnet = MLP()\nX = torch.randn(size=(2, 20))\nY = net(X)\ntorch.save(net.state_dict(), 'mlp.params')\nclone = MLP()\nclone.load_state_dict(torch.load('mlp.params'))\nclone.eval()"} +{"id": 496, "tensorflow": "import tensorflow as tf\ntf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')\nlen(tf.config.experimental.list_physical_devices('GPU'))\ndef try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')\ndef try_all_gpus():\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = tf.constant([1, 2, 3])\nx.device\nwith try_gpu():\n X = tf.ones((2, 3))\nwith try_gpu(1):\n Y = tf.random.uniform((2, 3))\nwith try_gpu(1):\n Z = X\nwith try_gpu(1):\n Z2 = Z\nZ2 is Z\nstrategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n net = tf.keras.models.Sequential([\n tf.keras.layers.Dense(1)])\nnet.layers[0].weights[0].device, net.layers[0].weights[1].device", "pytorch": "import torch\nfrom torch import nn\ntorch.device('cpu'), torch.device('cuda'), torch.device('cuda:1')\ntorch.cuda.device_count()\ndef try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return devices = [torch.device(f'cuda:{i}')\n return torch.device('cpu')\ndef try_all_gpus(): devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n return devices if devices else [torch.device('cpu')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = torch.tensor([1, 2, 3])\nx.device\nX = torch.ones(2, 3, device=try_gpu())\nY = torch.rand(2, 3, device=try_gpu(1))\nZ = X.cuda(1)\nZ.cuda(1) is Z\nnet = nn.Sequential(nn.Linear(3, 1))\nnet = net.to(device=try_gpu())\nnet[0].weight.data.device"} +{"id": 497, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j].assign(tf.reduce_sum(\n X[i: i + h, j: j + w] * K))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = tf.constant([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n def build(self, kernel_size):\n initializer = tf.random_normal_initializer()\n self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)\n self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)\n def call(self, inputs):\n return corr2d(inputs, self.weight) + self.bias\nX = tf.Variable(tf.ones((6, 8)))\nX[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))\nK = tf.constant([[1.0, -1.0]])\ncorr2d(tf.transpose(X), K)\nconv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)\nX = tf.reshape(X, (1, 6, 8, 1))\nY = tf.reshape(Y, (1, 6, 7, 1))\nlr = 3e-2\nY_hat = conv2d(X)\nfor i in range(10):\n with tf.GradientTape(watch_accessed_variables=False) as g:\n g.watch(conv2d.weights[0])\n Y_hat = conv2d(X)\n l = (abs(Y_hat - Y)) ** 2\n update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))\n weights = conv2d.get_weights()\n weights[0] = conv2d.weights[0] - update\n conv2d.set_weights(weights)\ntf.reshape(conv2d.get_weights()[0], (1, 2))", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = torch.tensor([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Module):\n def __init__(self, kernel_size):\n super().__init__()\n self.weight = nn.Parameter(torch.rand(kernel_size))\n self.bias = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return corr2d(x, self.weight) + self.bias\nX = torch.ones((6, 8))\nX[:, 2:6] = 0\nK = torch.tensor([[1.0, -1.0]])\ncorr2d(X.t(), K)\nconv2d = nn.Conv2d(1,1, kernel_size=(1, 2), bias=False)\nX = X.reshape((1, 1, 6, 8))\nY = Y.reshape((1, 1, 6, 7))\nlr = 3e-2\nfor i in range(10):\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n conv2d.zero_grad()\n l.sum().backward()\n conv2d.weight.data[:] -= lr * conv2d.weight.grad\nconv2d.weight.data.reshape((1, 2))"} +{"id": 498, "tensorflow": "import tensorflow as tf\n\ndef comp_conv2d(conv2d, X):\n X = tf.reshape(X, (1, ) + X.shape + (1, ))\n Y = conv2d(X)\n return tf.reshape(Y, Y.shape[1:3])\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')\nX = tf.random.uniform(shape=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))\ncomp_conv2d(conv2d, X).shape", "pytorch": "import torch\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1)\nX = torch.rand(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2d(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 499, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d_multi_in(X, K):\n return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)\nX = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return tf.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = tf.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = tf.reshape(X, (c_i, h * w))\n K = tf.reshape(K, (c_o, c_i))\n Y = tf.matmul(K, X)\n return tf.reshape(Y, (c_o, h, w))\nX = tf.random.normal((3, 3, 3), 0, 1)\nK = tf.random.normal((2, 3, 1, 1), 0, 1)\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6", "pytorch": "import torch\nfrom d2l import torch as d2l\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = torch.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = torch.matmul(K, X)\n return Y.reshape((c_o, h, w))\nX = torch.normal(0, 1, (3, 3, 3))\nK = torch.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(torch.abs(Y1 - Y2).sum()) < 1e-6"} +{"id": 500, "tensorflow": "import tensorflow as tf\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))\n elif mode =='avg':\n Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])\npool2d(X)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)\npaddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',\n strides=(2, 3))\npool2d(X_padded)\nX = tf.concat([X, X + 1], 3)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2d(3)\npool2d(X)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)\npool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))\npool2d(X)\nX = torch.cat((X, X + 1), 1)\npool2d = nn.MaxPool2d(3, padding=1, stride=2)\npool2d(X)"} +{"id": 501, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120, activation='sigmoid'),\n tf.keras.layers.Dense(84, activation='sigmoid'),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 28, 28, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\nclass TrainCallback(tf.keras.callbacks.Callback):\n def __init__(self, net, train_iter, test_iter, num_epochs, device_name):\n self.timer = d2l.Timer()\n self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n self.net = net\n self.train_iter = train_iter\n self.test_iter = test_iter\n self.num_epochs = num_epochs\n self.device_name = device_name\n def on_epoch_begin(self, epoch, logs=None):\n self.timer.start()\n def on_epoch_end(self, epoch, logs):\n self.timer.stop()\n test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']\n metrics = (logs['loss'], logs['accuracy'], test_acc)\n self.animator.add(epoch + 1, metrics)\n if epoch == self.num_epochs - 1:\n batch_size = next(iter(self.train_iter))[0].shape[0]\n num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()\ndef train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Flatten(),\n nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.Sigmoid(),\n nn.Linear(84, 10))\nX = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape: \t',X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n def init_weights(m):\n if type(m) == nn.Linear or type(m) == nn.Conv2d:\n nn.init.xavier_uniform_(m.weight)\n net.apply(init_weights)\n net.to(device)\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n net.train()\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n optimizer.zero_grad()\n X, y = X.to(device), y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n optimizer.step()\n with torch.no_grad():\n metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))"} +{"id": 502, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nnet = nn.Sequential(\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Flatten(),\n nn.Linear(6400, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 10))\nX = torch.randn(1, 1, 224, 224)\nfor layer in net:\n X=layer(X)\n print(layer.__class__.__name__,'output shape:\t',X.shape)"} +{"id": 503, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef vgg_block(num_convs, num_channels):\n blk = tf.keras.models.Sequential()\n for _ in range(num_convs):\n blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))\n blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = tf.keras.models.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)]))\n return net\nnet = vgg(conv_arch)\nX = tf.random.uniform((1, 224, 224, 1))\nfor blk in net.layers:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = lambda: vgg(small_conv_arch)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef vgg_block(num_convs, in_channels, out_channels):\n layers = []\n for _ in range(num_convs):\n layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n layers.append(nn.ReLU())\n in_channels = out_channels\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n return nn.Sequential(*layers)\ndef vgg(conv_arch):\n conv_blks = []\n in_channels = 1\n for (num_convs, out_channels) in conv_arch:\n conv_blks.append(vgg_block(num_convs, in_channels, out_channels))\n in_channels = out_channels\n return nn.Sequential(\n *conv_blks, nn.Flatten(),\n nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),\n nn.Linear(4096, 10))\nnet = vgg(conv_arch)\nX = torch.randn(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t',X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)"} +{"id": 504, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef nin_block(num_channels, kernel_size, strides, padding):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])\ndef net():\n return tf.keras.models.Sequential([\n nin_block(96, kernel_size=11, strides=4, padding='valid'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Reshape((1, 1, 10)),\n tf.keras.layers.Flatten(),\n ])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef nin_block(in_channels, out_channels, kernel_size, strides, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),\n nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),\n nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU())\nnet = nn.Sequential(\n nin_block(1, 96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2d(3, stride=2),\n nin_block(96, 256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2d(3, stride=2),\n nin_block(256, 384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2d(3, stride=2),\n nn.Dropout(0.5),\n nin_block(384, 10, kernel_size=3, strides=1, padding=1),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten())\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 505, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Inception(tf.keras.Model):\n def __init__(self, c1, c2, c3, c4):\n super().__init__()\n self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')\n self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')\n self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')\n self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')\n self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')\n self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')\n self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')\n def call(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return tf.keras.layers.Concatenate()([p1, p2, p3, p4])\ndef b1():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b2():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, 1, activation='relu'),\n tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b3():\n return tf.keras.models.Sequential([\n Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b4():\n return tf.keras.Sequential([\n Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b5():\n return tf.keras.Sequential([\n Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Flatten()\n ])\ndef net():\n return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform(shape=(1, 96, 96, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Inception(nn.Module):\n def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)\n self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)\n self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)\n self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)\n self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)\n self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)\n def forward(self, x):\n p1 = F.relu(self.p1_1(x))\n p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))\n p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))\n p4 = F.relu(self.p4_2(self.p4_1(x)))\n return torch.cat((p1, p2, p3, p4), dim=1)\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1),\n nn.ReLU(),\n nn.Conv2d(64, 192, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),\n Inception(256, 128, (128, 192), (32, 96), 64),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),\n Inception(512, 160, (112, 224), (24, 64), 64),\n Inception(512, 128, (128, 256), (24, 64), 64),\n Inception(512, 112, (144, 288), (32, 64), 64),\n Inception(528, 256, (160, 320), (32, 128), 128),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nb5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),\n Inception(832, 384, (192, 384), (48, 128), 128),\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten())\nnet = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))\nX = torch.rand(size=(1, 1, 96, 96))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 506, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps):\n inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)\n inv *= gamma\n Y = X * inv + (beta - moving_mean * inv)\n return Y\nclass BatchNorm(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(BatchNorm, self).__init__(**kwargs)\n def build(self, input_shape):\n weight_shape = [input_shape[-1], ]\n self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)\n self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)\n self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)\n self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)\n super(BatchNorm, self).build(input_shape)\n def assign_moving_average(self, variable, value):\n momentum = 0.9\n delta = variable * momentum + value * (1 - momentum)\n return variable.assign(delta)\n @tf.function\n def call(self, inputs, training):\n if training:\n axes = list(range(len(inputs.shape) - 1))\n batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)\n batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)\n batch_mean = tf.squeeze(batch_mean, axes)\n batch_variance = tf.squeeze(batch_variance, axes)\n mean_update = self.assign_moving_average(self.moving_mean, batch_mean)\n variance_update = self.assign_moving_average(self.moving_variance, batch_variance)\n self.add_update(mean_update)\n self.add_update(variance_update)\n mean, variance = batch_mean, batch_variance\n else:\n mean, variance = self.moving_mean, self.moving_variance\n output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)\n return output\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10)]\n )\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\ntf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10),\n ])", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not torch.is_grad_enabled():\n X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(dim=0)\n var = ((X - mean) ** 2).mean(dim=0)\n else:\n mean = X.mean(dim=(0, 2, 3), keepdim=True)\n var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)\n X_hat = (X - mean) / torch.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean.data, moving_var.data\nclass BatchNorm(nn.Module):\n def __init__(self, num_features, num_dims):\n super().__init__()\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = nn.Parameter(torch.ones(shape))\n self.beta = nn.Parameter(torch.zeros(shape))\n self.moving_mean = torch.zeros(shape)\n self.moving_var = torch.ones(shape)\n def forward(self, X):\n if self.moving_mean.device != X.device:\n self.moving_mean = self.moving_mean.to(X.device)\n self.moving_var = self.moving_var.to(X.device)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma, self.beta, self.moving_mean,\n self.moving_var, eps=1e-5, momentum=0.9)\n return Y\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),\n nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),\n nn.Linear(84, 10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nnet[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,))\nnet = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),\n nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),\n nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),\n nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),\n nn.Linear(84, 10))"} +{"id": 507, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Residual(tf.keras.Model):\n def __init__(self, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = tf.keras.layers.Conv2D(\n num_channels, padding='same', kernel_size=3, strides=strides)\n self.conv2 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=3, padding='same')\n self.conv3 = None\n if use_1x1conv:\n self.conv3 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=1, strides=strides)\n self.bn1 = tf.keras.layers.BatchNormalization()\n self.bn2 = tf.keras.layers.BatchNormalization()\n def call(self, X):\n Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3 is not None:\n X = self.conv3(X)\n Y += X\n return tf.keras.activations.relu(Y)\nblk = Residual(3)\nX = tf.random.uniform((4, 6, 6, 3))\nY = blk(X)\nY.shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\nclass ResnetBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):\n super(ResnetBlock, self).__init__(**kwargs)\n self.residual_layers = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n self.residual_layers.append(Residual(num_channels))\n def call(self, X):\n for layer in self.residual_layers.layers:\n X = layer(X)\n return X\nb2 = ResnetBlock(64, 2, first_block=True)\nb3 = ResnetBlock(128, 2)\nb4 = ResnetBlock(256, 2)\nb5 = ResnetBlock(512, 2)\ndef net():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),\n ResnetBlock(64, 2, first_block=True),\n ResnetBlock(128, 2),\n ResnetBlock(256, 2),\n ResnetBlock(512, 2),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Dense(units=10)])\nX = tf.random.uniform(shape=(1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nclass Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\nblk = Residual(3,3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\nblk = Residual(3,6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\ndef resnet_block(input_channels, num_channels, num_residuals, first_block=False):\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))\n else:\n blk.append(Residual(num_channels, num_channels))\n return blk\nb2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))\nb3 = nn.Sequential(*resnet_block(64, 128, 2))\nb4 = nn.Sequential(*resnet_block(128, 256, 2))\nb5 = nn.Sequential(*resnet_block(256, 512, 2))\nnet = nn.Sequential(b1, b2, b3, b4, b5,\n nn.AdaptiveAvgPool2d((1,1)),\n nn.Flatten(), nn.Linear(512, 10))\nX = torch.rand(size=(1, 1, 224, 224))\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 508, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels):\n super(ConvBlock, self).__init__()\n self.bn = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')\n self.listLayers = [self.bn, self.relu, self.conv]\n def call(self, x):\n y = x\n for layer in self.listLayers.layers:\n y = layer(y)\n y = tf.keras.layers.concatenate([x,y], axis=-1)\n return y\nclass DenseBlock(tf.keras.layers.Layer):\n def __init__(self, num_convs, num_channels):\n super(DenseBlock, self).__init__()\n self.listLayers = []\n for _ in range(num_convs):\n self.listLayers.append(ConvBlock(num_channels))\n def call(self, x):\n for layer in self.listLayers.layers:\n x = layer(x)\n return x\nblk = DenseBlock(2, 10)\nX = tf.random.uniform((4, 8, 8, 3))\nY = blk(X)\nY.shape\nclass TransitionBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n self.batch_norm = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)\n self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)\n def call(self, x):\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.conv(x)\n return self.avg_pool(x)\nblk = TransitionBlock(10)\nblk(Y).shape\ndef block_1():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef block_2():\n net = block_1()\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = [4, 4, 4, 4]\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(TransitionBlock(num_channels))\n return net\ndef net():\n net = block_2()\n net.add(tf.keras.layers.BatchNormalization())\n net.add(tf.keras.layers.ReLU())\n net.add(tf.keras.layers.GlobalAvgPool2D())\n net.add(tf.keras.layers.Flatten())\n net.add(tf.keras.layers.Dense(10))\n return net", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\ndef conv_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1))\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, input_channels, num_channels):\n super(DenseBlock, self).__init__()\n layer = []\n for i in range(num_convs):\n layer.append(conv_block(num_channels * i + input_channels, num_channels))\n self.net = nn.Sequential(*layer)\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = torch.cat((X, Y), dim=1)\n return X\nblk = DenseBlock(2, 3, 10)\nX = torch.randn(4, 3, 8, 8)\nY = blk(X)\nY.shape\ndef transition_block(input_channels, num_channels):\n return nn.Sequential(\n nn.BatchNorm2d(input_channels), nn.ReLU(),\n nn.Conv2d(input_channels, num_channels, kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2))\nblk = transition_block(23, 10)\nblk(Y).shape\nb1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nblks = []\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n blks.append(DenseBlock(num_convs, num_channels, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n blks.append(transition_block(num_channels, num_channels // 2))\n num_channels = num_channels // 2\nnet = nn.Sequential(\n b1, *blks,\n nn.BatchNorm2d(num_channels), nn.ReLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(),\n nn.Linear(num_channels, 10))"} +{"id": 509, "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nT = 1000\ntime = tf.range(1, T + 1, dtype=tf.float32)\nx = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = tf.Variable(tf.zeros((T - tau, tau)))\nfor i in range(tau):\n features[:, i].assign(x[i: T - tau + i])\nlabels = tf.reshape(x[tau:], (-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(1)])\n return net\nloss = tf.keras.losses.MeanSquaredError()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = tf.keras.optimizers.Adam()\n for epoch in range(epochs):\n for X, y in train_iter:\n with tf.GradientTape() as g:\n out = net(X)\n l = loss(y, out)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n trainer.apply_gradients(zip(grads, params))\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.numpy(), onestep_preds.numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = tf.Variable(tf.zeros(T))\nmultistep_preds[:n_train + tau].assign(x[:n_train + tau])\nfor i in range(n_train + tau, T):\n multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.numpy(), onestep_preds.numpy(),\n multistep_preds[n_train + tau:].numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))\nfor i in range(tau):\n features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())\nfor i in range(tau, tau + max_steps):\n features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "pytorch": "%matplotlib inline\nimport torch\nfrom torch import nn\nfrom d2l import torch as d2l\nT = 1000\ntime = torch.arange(1, T + 1, dtype=torch.float32)\nx = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = torch.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef init_weights(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\ndef get_net():\n net = nn.Sequential(nn.Linear(4, 10),\n nn.ReLU(),\n nn.Linear(10, 1))\n net.apply(init_weights)\n return net\nloss = nn.MSELoss(reduction='none')\ndef train(net, train_iter, loss, epochs, lr):\n trainer = torch.optim.Adam(net.parameters(), lr)\n for epoch in range(epochs):\n for X, y in train_iter:\n trainer.zero_grad()\n l = loss(net(X), y)\n l.sum().backward()\n trainer.step()\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = torch.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.detach().numpy(), onestep_preds.detach().numpy(),\n multistep_preds[n_train + tau:].detach().numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = torch.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 510, "tensorflow": "import collections\nimport re\nfrom d2l import tensorflow as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)", "pytorch": "import collections\nimport re\nfrom d2l import torch as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"} +{"id": 511, "tensorflow": "import random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield tf.constant(X), tf.constant(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = tf.constant(corpus[offset: offset + num_tokens])\n Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])\n Xs = tf.reshape(Xs, (batch_size, -1))\n Ys = tf.reshape(Ys, (batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_batches * num_steps, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "pytorch": "import random\nimport torch\nfrom d2l import torch as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield torch.tensor(X), torch.tensor(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = torch.tensor(corpus[offset: offset + num_tokens])\n Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 512, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nX, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)\nH, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)\ntf.matmul(X, W_xh) + tf.matmul(H, W_hh)\ntf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))", "pytorch": "import torch\nfrom d2l import torch as d2l\nX, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))\nH, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))\ntorch.matmul(X, W_xh) + torch.matmul(H, W_hh)\ntorch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))"} +{"id": 513, "tensorflow": "%matplotlib inline\nimport math\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ntrain_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)\ntf.one_hot(tf.constant([0, 2]), len(vocab))\nX = tf.reshape(tf.range(10), (2, 5))\ntf.one_hot(tf.transpose(X), 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)\n W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)\n b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.init_state, self.forward_fn = init_state, forward_fn\n self.trainable_variables = get_params(vocab_size, num_hiddens)\n def __call__(self, X, state):\n X = tf.one_hot(tf.transpose(X), self.vocab_size)\n X = tf.cast(X, tf.float32)\n return self.forward_fn(X, state, self.trainable_variables)\n def begin_state(self, batch_size, *args, **kwargs):\n return self.init_state(batch_size, self.num_hiddens)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_hiddens = 512\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab):\n state = net.begin_state(batch_size=1, dtype=tf.float32)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),\n (1, 1)).numpy()\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab)\ndef grad_clipping(grads, theta):\n theta = tf.constant(theta, dtype=tf.float32)\n new_grad = []\n for grad in grads:\n if isinstance(grad, tf.IndexedSlices):\n new_grad.append(tf.convert_to_tensor(grad))\n else:\n new_grad.append(grad)\n norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()\n for grad in new_grad))\n norm = tf.cast(norm, tf.float32)\n if tf.greater(norm, theta):\n for i, grad in enumerate(new_grad):\n new_grad[i] = grad * theta / norm\n else:\n new_grad = new_grad\n return new_grad\ndef train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as g:\n y_hat, state = net(X, state)\n y = tf.reshape(tf.transpose(Y), (-1))\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n grads = grad_clipping(grads, 1)\n updater.apply_gradients(zip(grads, params))\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):\n with strategy.scope():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n updater = tf.keras.optimizers.SGD(lr)\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\n device = d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, strategy)\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\ntrain_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)", "pytorch": "%matplotlib inline\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nF.one_hot(torch.tensor([0, 2]), len(vocab))\nX = torch.arange(10).reshape((2, 5))\nF.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device) * 0.01\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = torch.zeros(num_hiddens, device=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)\n Y = torch.mm(H, W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = F.one_hot(X.T, self.vocab_size).type(torch.float32)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, device):\n return self.init_state(batch_size, self.num_hiddens, device)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.to(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, device=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(dim=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())\ndef grad_clipping(net, theta):\n if isinstance(net, nn.Module):\n params = [p for p in net.parameters() if p.requires_grad]\n else:\n params = net.params\n norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], device=device)\n else:\n if isinstance(net, nn.Module) and not isinstance(state, tuple):\n state.detach_()\n else:\n for s in state:\n s.detach_()\n y = Y.T.reshape(-1)\n X, y = X.to(device), y.to(device)\n y_hat, state = net(X, state)\n l = loss(y_hat, y.long()).mean()\n if isinstance(updater, torch.optim.Optimizer):\n updater.zero_grad()\n l.backward()\n grad_clipping(net, 1)\n updater.step()\n else:\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * y.numel(), y.numel())\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, nn.Module):\n updater = torch.optim.SGD(net.parameters(), lr)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)"} +{"id": 514, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')\nrnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)\nstate = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)\nstate.shape\nX = tf.random.uniform((num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(tf.keras.layers.Layer):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = tf.keras.layers.Dense(vocab_size)\n def call(self, inputs, state):\n X = tf.one_hot(tf.transpose(inputs), self.vocab_size)\n Y, *state = self.rnn(X, state)\n output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.cell.get_initial_state(*args, **kwargs)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n net = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)", "pytorch": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = nn.RNN(len(vocab), num_hiddens)\nstate = torch.zeros((1, batch_size, num_hiddens))\nstate.shape\nX = torch.rand(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, state_new.shape\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.num_hiddens = self.rnn.hidden_size\n if not self.rnn.bidirectional:\n self.num_directions = 1\n self.linear = nn.Linear(self.num_hiddens, self.vocab_size)\n else:\n self.num_directions = 2\n self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)\n def forward(self, inputs, state):\n X = F.one_hot(inputs.T.long(), self.vocab_size)\n X = X.to(torch.float32)\n Y, state = self.rnn(X, state)\n output = self.linear(Y.reshape((-1, Y.shape[-1])))\n return output, state\n def begin_state(self, device, batch_size=1):\n if not isinstance(self.rnn, nn.LSTM):\n return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)\n else:\n return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device),\n torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device))\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, vocab_size=len(vocab))\nnet = net.to(device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)"} +{"id": 515, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n def three():\n return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)\n R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)\n H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_epochs, lr = 500, 1\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\ngru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')\ngru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)\n R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)\n H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = H @ W_hq + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\ngru_layer = nn.GRU(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 516, "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))\ndef lstm(inputs, state, params):\n W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params\n (H, C) = state\n outputs = []\n for X in inputs:\n X=tf.reshape(X,[-1,W_xi.shape[0]])\n I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)\n F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)\n O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)\n C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * tf.tanh(C)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,C)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\nlstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')\nlstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)", "pytorch": "import torch\nfrom torch import nn\nfrom d2l import torch as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return torch.randn(size=shape, device=device)*0.01\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = torch.zeros(num_outputs, device=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.requires_grad_(True)\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)\n F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)\n O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)\n C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * torch.tanh(C)\n Y = (H @ W_hq) + b_q\n outputs.append(Y)\n return torch.cat(outputs, dim=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nnum_inputs = vocab_size\nlstm_layer = nn.LSTM(num_inputs, num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nmodel = model.to(device)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)"} +{"id": 517, "tensorflow": "import os\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = tf.constant([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = tf.reduce_sum(\n tf.cast(array != vocab[''], tf.int32), 1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', tf.cast(X, tf.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', tf.cast(Y, tf.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "pytorch": "import os\nimport torch\nfrom d2l import torch as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = torch.tensor([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).type(torch.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.type(torch.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.type(torch.int32))\n print('Valid length of Y:', Y_valid_len)\n break"} +{"id": 518, "mxnet": "x = np.arange(12)\nx.size\nX = x.reshape(3, 4)\nnp.zeros((2, 3, 4))\nnp.ones((2, 3, 4))\nnp.random.normal(0, 1, size=(3, 4))\nnp.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = np.array([1, 2, 4, 8])\ny = np.array([2, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\nnp.exp(x)\nX = np.arange(12).reshape(3, 4)\nY = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nnp.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1)\nX.sum()\na = np.arange(3).reshape(3, 1)\nb = np.arange(2).reshape(1, 2)\nX[1, 2] = 9\nX[0:2, :] = 12\nZ = np.zeros_like(Y)\nZ[:] = X + Y\nbefore = id(X)\nX += Y\nid(X) == before\nA = X.asnumpy()\nB = np.array(A)\na = np.array([3.5])\nprint(a, a.item(), float(a), int(a))", "tensorflow": "x = tf.range(12)\ntf.size(x)\nX = tf.reshape(x, (3, 4))\ntf.zeros((2, 3, 4))\ntf.ones((2, 3, 4))\ntf.random.normal(shape=[3, 4])\ntf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\nx = tf.constant([1.0, 2, 4, 8])\ny = tf.constant([2.0, 2, 2, 2])\nx + y, x - y, x * y, x / y, x ** y\ntf.exp(x)\nX = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))\nY = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])\ntf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)\ntf.reduce_sum(X)\na = tf.reshape(tf.range(3), (3, 1))\nb = tf.reshape(tf.range(2), (1, 2))\nX_var = tf.Variable(X)\nX_var[1, 2].assign(9)\nX_var = tf.Variable(X)\nX_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)\nZ = tf.Variable(tf.zeros_like(Y))\nZ.assign(X + Y)\n@tf.function\ndef computation(X, Y):\n Z = tf.zeros_like(Y)\n A = X + Y\n B = A + Y\n C = B + Y\n return C + Y\ncomputation(X, Y)\nA = X.numpy()\nB = tf.constant(A)\na = tf.constant([3.5]).numpy()\nprint(a, a.item(), float(a), int(a))"} +{"id": 519, "mxnet": "from mxnet import np\nX, y = np.array(inputs.values), np.array(outputs.values)", "tensorflow": "import tensorflow as tf\nX, y = tf.constant(inputs.values), tf.constant(outputs.values)"} +{"id": 520, "mxnet": "from mxnet import np, npx\nnpx.set_np()\nx = np.array(3.0)\ny = np.array(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = np.arange(4)\nA = np.arange(20).reshape(5, 4)\nA.T\nB = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == B.T\nX = np.arange(24).reshape(2, 3, 4)\nA = np.arange(20).reshape(5, 4)\nB = A.copy()\nprint(A, A + B)\na = 2\nX = np.arange(24).reshape(2, 3, 4)\nprint(a + X, (a * X).shape)\nx = np.arange(4)\nprint(x, x.sum())\na = A.sum()\nA_sum_axis0 = A.sum(axis=0)\nA_sum_axis1 = A.sum(axis=1)\nA.sum(axis=[0, 1])\nA.mean()\nA.sum() / A.size\nA.mean(axis=0)\nA.sum(axis=0) / A.shape[0]\nsum_A = A.sum(axis=1, keepdims=True)\nA.cumsum(axis=0)\ny = np.ones(4)\nprint(np.dot(x, y))\nnp.sum(x * y)\nA.shape, x.shape, np.dot(A, x)\nB = np.ones(shape=(4, 3))\nnp.dot(A, B)\nu = np.array([3, -4])\nnp.linalg.norm(u)\nnp.abs(u).sum()\nnp.linalg.norm(np.ones((4, 9)))", "tensorflow": "import tensorflow as tf\nx = tf.constant(3.0)\ny = tf.constant(2.0)\nprint(x + y, x * y, x / y, x**y)\nx = tf.range(4)\nA = tf.reshape(tf.range(20), (5, 4))\ntf.transpose(A)\nB = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])\nB == tf.transpose(B)\nX = tf.reshape(tf.range(24), (2, 3, 4))\nA = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))\nB = A\nprint(A, A + B)\na = 2\nX = tf.reshape(tf.range(24), (2, 3, 4))\nprint(a + X, (a * X).shape)\nx = tf.range(4, dtype=tf.float32)\nprint(x, tf.reduce_sum(x))\na = tf.reduce_sum(A)\nA_sum_axis0 = tf.reduce_sum(A, axis=0)\nA_sum_axis1 = tf.reduce_sum(A, axis=1\ntf.reduce_sum(A, axis=[0, 1])\ntf.reduce_mean(A)\ntf.reduce_sum(A) / tf.size(A).numpy()\ntf.reduce_mean(A, axis=0)\ntf.reduce_sum(A, axis=0) / A.shape[0]\nsum_A = tf.reduce_sum(A, axis=1, keepdims=True)\ntf.cumsum(A, axis=0)\ny = tf.ones(4, dtype=tf.float32)\nprint(tf.tensordot(x, y, axes=1))\ntf.reduce_sum(x * y)\nA.shape, x.shape, tf.linalg.matvec(A, x)\nB = tf.ones((4, 3), tf.float32)\ntf.matmul(A, B)\nu = tf.constant([3.0, -4.0])\ntf.norm(u)\ntf.reduce_sum(tf.abs(u))\ntf.norm(tf.ones((4, 9)))"} +{"id": 521, "mxnet": "%matplotlib inline\nfrom matplotlib_inline import backend_inline\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef f(x):\n return 3 * x ** 2 - 4 * x", "tensorflow": "%matplotlib inline\nimport numpy as np\nfrom matplotlib_inline import backend_inline\nfrom d2l import tensorflow as d2l\ndef f(x):\n return 3 * x ** 2 - 4 * x"} +{"id": 522, "mxnet": "from mxnet import autograd, np, npx\nnpx.set_np()\nx = np.arange(4.0)\nx.attach_grad()\nx.grad\nwith autograd.record():\n y = 2 * np.dot(x, x)\ny.backward()\nx.grad\nx.grad == 4 * x\nwith autograd.record():\n y = x.sum()\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\ny.backward()\nx.grad\nwith autograd.record():\n y = x * x\n u = y.detach()\n z = u * x\nz.backward()\nx.grad == u\ny.backward()\nx.grad == 2 * x\ndef f(a):\n b = a * 2\n while np.linalg.norm(b) < 1000:\n b = b * 2\n if b.sum() > 0:\n c = b\n else:\n c = 100 * b\n return c\na = np.random.normal()\na.attach_grad()\nwith autograd.record():\n d = f(a)\nd.backward()\na.grad == d / a", "tensorflow": "import tensorflow as tf\nx = tf.range(4, dtype=tf.float32)\nx = tf.Variable(x)\nwith tf.GradientTape() as t:\n y = 2 * tf.tensordot(x, x, axes=1)\nx_grad = t.gradient(y, x)\nx_grad\nx_grad == 4 * x\nwith tf.GradientTape() as t:\n y = tf.reduce_sum(x)\nt.gradient(y, x)\nwith tf.GradientTape() as t:\n y = x * x\nt.gradient(y, x)\nwith tf.GradientTape(persistent=True) as t:\n y = x * x\n u = tf.stop_gradient(y)\n z = u * x\nx_grad = t.gradient(z, x)\nx_grad == u\nt.gradient(y, x) == 2 * x\ndef f(a):\n b = a * 2\n while tf.norm(b) < 1000:\n b = b * 2\n if tf.reduce_sum(b) > 0:\n c = b\n else:\n c = 100 * b\n return c\na = tf.Variable(tf.random.normal(shape=()))\nwith tf.GradientTape() as t:\n d = f(a)\nd_grad = t.gradient(d, a)\nd_grad\nd_grad == d / a"} +{"id": 523, "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfair_probs = [1.0 / 6] * 6\nnp.random.multinomial(1, fair_probs)\nnp.random.multinomial(10, fair_probs)\ncounts = np.random.multinomial(1000, fair_probs).astype(np.float32)", "tensorflow": "%matplotlib inline\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom d2l import tensorflow as d2l\nfair_probs = tf.ones(6) / 6\ntfp.distributions.Multinomial(1, fair_probs).sample()\ntfp.distributions.Multinomial(10, fair_probs).sample()\ncounts = tfp.distributions.Multinomial(1000, fair_probs).sample()"} +{"id": 524, "mxnet": "counts = np.random.multinomial(10, fair_probs, size=500)\ncum_counts = counts.astype(np.float32).cumsum(axis=0)\nestimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].asnumpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nfrom mxnet import np\na = dir(np.random)\nhelp(np.ones)\nnp.ones(4)", "tensorflow": "counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)\ncum_counts = tf.cumsum(counts, axis=0)\nestimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)\nd2l.set_figsize((6, 4.5))\nfor i in range(6):\n d2l.plt.plot(estimates[:, i].numpy(), label=(\"P(die=\" + str(i + 1) + \")\"))\nd2l.plt.axhline(y=0.167, color='black', linestyle='dashed')\nd2l.plt.gca().set_xlabel('Groups of experiments')\nd2l.plt.gca().set_ylabel('Estimated probability')\nd2l.plt.legend();\nimport tensorflow as tf\na = dir(tf.random)\nhelp(tf.ones)\ntf.ones(4)"} +{"id": 525, "mxnet": "%matplotlib inline\nimport math\nimport time\nfrom mxnet import np\nfrom d2l import mxnet as d2l\nn = 10000\na = np.ones(n)\nb = np.ones(n)\nc = np.zeros(n)\ntimer = Timer()\nfor i in range(n):\n c[i] = a[i] + b[i]\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])", "tensorflow": "%matplotlib inline\nimport math\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn = 10000\na = tf.ones(n)\nb = tf.ones(n)\nc = tf.Variable(tf.zeros(n))\ntimer = Timer()\nfor i in range(n):\n c[i].assign(a[i] + b[i])\nx = np.arange(-7, 7, 0.01)\nparams = [(0, 1), (0, 2), (3, 1)]\nd2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])"} +{"id": 526, "mxnet": "%matplotlib inline\nimport random\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef synthetic_data(w, b, num_examples):\n X = np.random.normal(0, 1, (num_examples, len(w)))\n y = np.dot(X, w) + b\n y += np.random.normal(0, 0.01, y.shape)\n return X, y.reshape((-1, 1))\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n batch_indices = np.array(indices[i: min(i + batch_size, num_examples)])\n yield features[batch_indices], labels[batch_indices]\nw = np.random.normal(0, 0.01, (2, 1))\nb = np.zeros(1)\nw.attach_grad()\nb.attach_grad()\ndef linreg(X, w, b):\n return np.dot(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2\ndef sgd(params, lr, batch_size):\n for param in params:\n param[:] = param - lr * param.grad / batch_size\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with autograd.record():\n l = loss(net(X, w, b), y)\n l.backward()\n sgd([w, b], lr, batch_size)\n train_l = loss(net(features, w, b), labels)", "tensorflow": "%matplotlib inline\nimport random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef synthetic_data(w, b, num_examples):\n X = tf.zeros((num_examples, w.shape[0]))\n X += tf.random.normal(shape=X.shape)\n y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b\n y += tf.random.normal(shape=y.shape, stddev=0.01)\n y = tf.reshape(y, (-1, 1))\n return X, y\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = synthetic_data(true_w, true_b, 1000)\nd2l.set_figsize()\nd2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices)\n for i in range(0, num_examples, batch_size):\n j = tf.constant(indices[i: min(i + batch_size, num_examples)])\n yield tf.gather(features, j), tf.gather(labels, j)\nw = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)\nb = tf.Variable(tf.zeros(1), trainable=True)\ndef linreg(X, w, b):\n return tf.matmul(X, w) + b\ndef squared_loss(y_hat, y):\n return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2\ndef sgd(params, grads, lr, batch_size):\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)\nlr = 0.03\nnum_epochs = 3\nnet = linreg\nloss = squared_loss\nfor epoch in range(num_epochs):\n for X, y in data_iter(batch_size, features, labels):\n with tf.GradientTape() as g:\n l = loss(net(X, w, b), y)\n dw, db = g.gradient(l, [w, b])\n sgd([w, b], [dw, db], lr, batch_size)\n train_l = loss(net(features, w, b), labels)"} +{"id": 527, "mxnet": "from mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntrue_w = np.array([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = gluon.data.ArrayDataset(*data_arrays)\n return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nfrom mxnet.gluon import nn\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nfrom mxnet import init\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.L2Loss()\nfrom mxnet import gluon\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03})\nw = net[0].weight.data()\nb = net[0].bias.data()", "tensorflow": "import numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w = tf.constant([2, -3.4])\ntrue_b = 4.2\nfeatures, labels = d2l.synthetic_data(true_w, true_b, 1000)\ndef load_array(data_arrays, batch_size, is_train=True):\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset\nbatch_size = 10\ndata_iter = load_array((features, labels), batch_size)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1))\ninitializer = tf.initializers.RandomNormal(stddev=0.01)\nnet = tf.keras.Sequential()\nnet.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))\nloss = tf.keras.losses.MeanSquaredError()\ntrainer = tf.keras.optimizers.SGD(learning_rate=0.03)\nw = net.get_weights()[0]\nb = net.get_weights()[1]"} +{"id": 528, "mxnet": "%matplotlib inline\nimport sys\nfrom mxnet import gluon\nfrom d2l import mxnet as d2l\nd2l.use_svg_display()\nmnist_train = gluon.data.vision.FashionMNIST(train=True)\nmnist_test = gluon.data.vision.FashionMNIST(train=False)\nlen(mnist_train), len(mnist_test)\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.asnumpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX, y = mnist_train[:18]\nshow_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\n return 0 if sys.platform.startswith('win') else 4\ntransformer = gluon.data.vision.transforms.ToTensor()\ntrain_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers())\ndef load_data_fashion_mnist(batch_size, resize=None):\n dataset = gluon.data.vision\n trans = [dataset.transforms.ToTensor()]\n if resize:\n trans.insert(0, dataset.transforms.Resize(resize))\n trans = dataset.transforms.Compose(trans)\n mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)\n mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)\n return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nd2l.use_svg_display()\nmnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\nlen(mnist_train[0]), len(mnist_test[0])\ndef show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)\n axes = axes.flatten()\n for i, (ax, img) in enumerate(zip(axes, imgs)):\n ax.imshow(img.numpy())\n ax.axes.get_xaxis().set_visible(False)\n ax.axes.get_yaxis().set_visible(False)\n if titles:\n ax.set_title(titles[i])\n return axes\nX = tf.constant(mnist_train[0][:18])\ny = tf.constant(mnist_train[1][:18])\nshow_images(X, 2, 9, titles=get_fashion_mnist_labels(y));\nbatch_size = 256\ntrain_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))\ndef load_data_fashion_mnist(batch_size, resize=None):\n mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()\n process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))\n resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)\n return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),\n tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))"} +{"id": 529, "mxnet": "from IPython import display\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = np.random.normal(0, 0.01, (num_inputs, num_outputs))\nb = np.zeros(num_outputs)\nW.attach_grad()\nb.attach_grad()\nX = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\nX.sum(0, keepdims=True), X.sum(1, keepdims=True)\ndef softmax(X):\n X_exp = np.exp(X)\n partition = X_exp.sum(1, keepdims=True)\n return X_exp / partition\nX = np.random.normal(0, 1, (2, 5))\nX_prob = softmax(X)\nX_prob, X_prob.sum(1)\ndef net(X):\n return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b)\ny = np.array([0, 2])\ny_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny_hat[[0, 1], y]\ndef cross_entropy(y_hat, y):\n return - np.log(y_hat[range(len(y_hat)), y])\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = y_hat.argmax(axis=1)\n cmp = y_hat.astype(y.dtype) == y\n return float(cmp.astype(y.dtype).sum())\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n if isinstance(updater, gluon.Trainer):\n updater = updater.step\n for X, y in train_iter:\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.size)\n return metric[0] / metric[2], metric[1] / metric[2]\nlr = 0.1\ndef updater(batch_size):\n return d2l.sgd([W, b], lr, batch_size)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)", "tensorflow": "import tensorflow as tf\nfrom IPython import display\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs = 784\nnum_outputs = 10\nW = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))\nb = tf.Variable(tf.zeros(num_outputs))\nX = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\ntf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)\ndef softmax(X):\n X_exp = tf.exp(X)\n partition = tf.reduce_sum(X_exp, 1, keepdims=True)\n return X_exp / partition\nX = tf.random.normal((2, 5), 0, 1)\nX_prob = softmax(X)\nX_prob, tf.reduce_sum(X_prob, 1)\ndef net(X):\n return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)\ny_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])\ny = tf.constant([0, 2])\ntf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))\ndef cross_entropy(y_hat, y):\n return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))\ncross_entropy(y_hat, y)\ndef accuracy(y_hat, y):\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = tf.argmax(y_hat, axis=1)\n cmp = tf.cast(y_hat, y.dtype) == y\n return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))\ndef evaluate_accuracy(net, data_iter):\n metric = Accumulator(2)\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\ndef train_epoch_ch3(net, train_iter, loss, updater):\n metric = Accumulator(3)\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n return metric[0] / metric[2], metric[1] / metric[2]\nclass Updater():\n def __init__(self, params, lr):\n self.params = params\n self.lr = lr\n def __call__(self, batch_size, grads):\n d2l.sgd(self.params, grads, self.lr, batch_size)\nupdater = Updater([W, b], lr=0.1)\ndef predict_ch3(net, test_iter, n=6):\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])\npredict_ch3(net, test_iter)"} +{"id": 530, "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = nn.Sequential()\nnet.add(nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = tf.keras.models.Sequential()\nnet.add(tf.keras.layers.Flatten(input_shape=(28, 28)))\nweight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)\nnet.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=.1)"} +{"id": 531, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.relu(x)\nd2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5))\nwith autograd.record():\n y = npx.sigmoid(x)\nd2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5))\nwith autograd.record():\n y = np.tanh(x)\nd2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5))\ny.backward()\nd2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)\ny = tf.nn.relu(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.relu(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))\ny = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',\n figsize=(5, 2.5))\ny = tf.nn.tanh(x)\nd2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))\nwith tf.GradientTape() as t:\n y = tf.nn.tanh(x)\nd2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))"} +{"id": 532, "mxnet": "from mxnet import gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens))\nb1 = np.zeros(num_hiddens)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs))\nb2 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2]\nfor param in params:\n param.attach_grad()\ndef relu(X):\n return np.maximum(X, 0)\ndef net(X):\n X = X.reshape((-1, num_inputs))\n H = relu(np.dot(X, W1) + b1)\n return np.dot(H, W2) + b2\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\nnum_epochs, lr = 10, 0.1\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnum_inputs, num_outputs, num_hiddens = 784, 10, 256\nW1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))\nb1 = tf.Variable(tf.zeros(num_hiddens))\nW2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))\nb2 = tf.Variable(tf.zeros(num_outputs))\nparams = [W1, b1, W2, b2]\ndef relu(X):\n return tf.math.maximum(X, 0)\ndef net(X):\n X = tf.reshape(X, (-1, num_inputs))\n H = relu(tf.matmul(X, W1) + b1)\n return tf.matmul(H, W2) + b2\ndef loss(y_hat, y):\n return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)\nnum_epochs, lr = 10, 0.1\nupdater = d2l.Updater([W1, W2, b1, b2], lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)"} +{"id": 533, "mxnet": "from mxnet import gluon, init, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'), nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nnet = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])\nbatch_size, lr, num_epochs = 256, 0.1, 10\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 534, "mxnet": "import math\nfrom mxnet import gluon, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(l.sum(), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = gluon.loss.L2Loss()\n net = nn.Sequential()\n net.add(nn.Dense(1, use_bias=False))\n net.initialize()\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))", "tensorflow": "import math\nimport numpy as np\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntrue_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]\nfeatures[:2], poly_features[:2, :], labels[:2]\ndef evaluate_loss(net, data_iter, loss):\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(tf.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400):\n loss = tf.losses.MeanSquaredError()\n input_shape = train_features.shape[-1]\n net = tf.keras.Sequential()\n net.add(tf.keras.layers.Dense(1, use_bias=False))\n batch_size = min(10, train_labels.shape[0])\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)\n trainer = tf.keras.optimizers.SGD(learning_rate=.01)\n animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])\n for epoch in range(num_epochs):\n d2l.train_epoch_ch3(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))"} +{"id": 535, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = np.random.normal(scale=1, size=(num_inputs, 1))\n b = np.zeros(1)\n w.attach_grad()\n b.attach_grad()\n return [w, b]\ndef l2_penalty(w):\n return (w**2).sum() / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y) + lambd * l2_penalty(w)\n l.backward()\n d2l.sgd([w, b], lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize(init.Normal(sigma=1))\n loss = gluon.loss.L2Loss()\n num_epochs, lr = 100, 0.003\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd})\n net.collect_params('.*bias').setattr('wd_mult', 0)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train, n_test, num_inputs, batch_size = 20, 100, 200, 5\ntrue_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05\ntrain_data = d2l.synthetic_data(true_w, true_b, n_train)\ntrain_iter = d2l.load_array(train_data, batch_size)\ntest_data = d2l.synthetic_data(true_w, true_b, n_test)\ntest_iter = d2l.load_array(test_data, batch_size, is_train=False)\ndef init_params():\n w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))\n b = tf.Variable(tf.zeros(shape=(1, )))\n return [w, b]\ndef l2_penalty(w):\n return tf.reduce_sum(tf.pow(w, 2)) / 2\ndef train(lambd):\n w, b = init_params()\n net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss\n num_epochs, lr = 100, 0.003\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + lambd * l2_penalty(w)\n grads = tape.gradient(l, [w, b])\n d2l.sgd([w, b], grads, lr, batch_size)\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))\ndef train_concise(wd):\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))\n net.build(input_shape=(1, num_inputs))\n w, b = net.trainable_variables\n loss = tf.keras.losses.MeanSquaredError()\n num_epochs, lr = 100, 0.003\n trainer = tf.keras.optimizers.SGD(learning_rate=lr)\n animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n l = loss(net(X), y) + net.losses\n grads = tape.gradient(l, net.trainable_variables)\n trainer.apply_gradients(zip(grads, net.trainable_variables))\n if (epoch + 1) % 5 == 0:\n animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))"} +{"id": 536, "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return np.zeros_like(X)\n if dropout == 0:\n return X\n mask = np.random.uniform(0, 1, X.shape) > dropout\n return mask.astype(np.float32) * X / (1.0 - dropout)\nX = np.arange(16).reshape(2, 8)\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\nW1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1))\nb1 = np.zeros(num_hiddens1)\nW2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2))\nb2 = np.zeros(num_hiddens2)\nW3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs))\nb3 = np.zeros(num_outputs)\nparams = [W1, b1, W2, b2, W3, b3]\nfor param in params:\n param.attach_grad()\ndropout1, dropout2 = 0.2, 0.5\ndef net(X):\n X = X.reshape(-1, num_inputs)\n H1 = npx.relu(np.dot(X, W1) + b1)\n if autograd.is_training():\n H1 = dropout_layer(H1, dropout1)\n H2 = npx.relu(np.dot(H1, W2) + b2)\n if autograd.is_training():\n H2 = dropout_layer(H2, dropout2)\n return np.dot(H2, W3) + b3\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = gluon.loss.SoftmaxCrossEntropyLoss()\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout1),\n nn.Dense(256, activation=\"relu\"),\n nn.Dropout(dropout2),\n nn.Dense(10))\nnet.initialize(init.Normal(sigma=0.01))\ntrainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef dropout_layer(X, dropout):\n assert 0 <= dropout <= 1\n if dropout == 1:\n return tf.zeros_like(X)\n if dropout == 0:\n return X\n mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout\n return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)\nX = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))\nnum_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256\ndropout1, dropout2 = 0.2, 0.5\nclass Net(tf.keras.Model):\n def __init__(self, num_outputs, num_hiddens1, num_hiddens2):\n super().__init__()\n self.input_layer = tf.keras.layers.Flatten()\n self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')\n self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')\n self.output_layer = tf.keras.layers.Dense(num_outputs)\n def call(self, inputs, training=None):\n x = self.input_layer(inputs)\n x = self.hidden1(x)\n if training:\n x = dropout_layer(x, dropout1)\n x = self.hidden2(x)\n if training:\n x = dropout_layer(x, dropout2)\n x = self.output_layer(x)\n return x\nnet = Net(num_outputs, num_hiddens1, num_hiddens2)\nnum_epochs, lr, batch_size = 10, 0.5, 256\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout1),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dropout(dropout2),\n tf.keras.layers.Dense(10),\n])\ntrainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)"} +{"id": 537, "mxnet": "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nfrom mxnet import autograd, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nx = np.arange(-8.0, 8.0, 0.1)\nx.attach_grad()\nwith autograd.record():\n y = npx.sigmoid(x)\ny.backward()\nd2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = np.random.normal(size=(4, 4))\nfor i in range(100):\n M = np.dot(M, np.random.normal(size=(4, 4)))", "tensorflow": "trainer = tf.keras.optimizers.SGD(learning_rate=lr)\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)\n%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nx = tf.Variable(tf.range(-8.0, 8.0, 0.1))\nwith tf.GradientTape() as t:\n y = tf.nn.sigmoid(x)\nd2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))\nM = tf.random.normal((4, 4))\nfor i in range(100):\n M = tf.matmul(M, tf.random.normal((4, 4)))"} +{"id": 538, "mxnet": "%matplotlib inline\nimport pandas as pd\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nn_train = train_data.shape[0]\ntrain_features = np.array(all_features[:n_train].values, dtype=np.float32)\ntest_features = np.array(all_features[n_train:].values, dtype=np.float32)\ntrain_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32)\nloss = gluon.loss.L2Loss()\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(1))\n net.initialize()\n return net\ndef log_rmse(net, features, labels):\n clipped_preds = np.clip(net(features), 1, float('inf'))\n return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean())\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay})\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\n train_ls.append(log_rmse(net, train_features, train_labels))\n if test_labels is not None:\n test_ls.append(log_rmse(net, test_features, test_labels))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = np.concatenate([X_train, X_part], 0)\n y_train = np.concatenate([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).asnumpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)", "tensorflow": "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nn_train = train_data.shape[0]\ntrain_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)\ntest_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)\ntrain_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)\nloss = tf.keras.losses.MeanSquaredError()\ndef get_net():\n net = tf.keras.models.Sequential()\n net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))\n return net\ndef log_rmse(y_true, y_pred):\n clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))\n return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))\ndef train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):\n train_ls, test_ls = [], []\n train_iter = d2l.load_array((train_features, train_labels), batch_size)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n net.compile(loss=loss, optimizer=optimizer)\n for epoch in range(num_epochs):\n for X, y in train_iter:\n with tf.GradientTape() as tape:\n y_hat = net(X)\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n optimizer.apply_gradients(zip(grads, params))\n train_ls.append(log_rmse(train_labels, net(train_features)))\n if test_labels is not None:\n test_ls.append(log_rmse(test_labels, net(test_features)))\n return train_ls, test_ls\ndef get_k_fold_data(k, i, X, y):\n assert k > 1\n fold_size = X.shape[0] // k\n X_train, y_train = None, None\n for j in range(k):\n idx = slice(j * fold_size, (j + 1) * fold_size)\n X_part, y_part = X[idx, :], y[idx]\n if j == i:\n X_valid, y_valid = X_part, y_part\n elif X_train is None:\n X_train, y_train = X_part, y_part\n else:\n X_train = tf.concat([X_train, X_part], 0)\n y_train = tf.concat([y_train, y_part], 0)\n return X_train, y_train, X_valid, y_valid\ndef train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):\n net = get_net()\n train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)\n d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')\n preds = net(test_features).numpy()\n test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)"} +{"id": 539, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nnet(X)\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.out = nn.Dense(10)\n def forward(self, X):\n return self.out(self.hidden(X))\nnet = MLP()\nnet.initialize()\nnet(X)\nclass MySequential(nn.Block):\n def add(self, block):\n\n self._children[block.name] = block\n def forward(self, X):\n for block in self._children.values():\n X = block(X)\n return X\nnet = MySequential()\nnet.add(nn.Dense(256, activation='relu'))\nnet.add(nn.Dense(10))\nnet.initialize()\nnet(X)\nclass FixedHiddenMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20)))\n self.dense = nn.Dense(20, activation='relu')\n def forward(self, X):\n X = self.dense(X)\n X = npx.relu(np.dot(X, self.rand_weight.data()) + 1)\n X = self.dense(X)\n while np.abs(X).sum() > 1:\n X /= 2\n return X.sum()\nnet = FixedHiddenMLP()\nnet.initialize()\nnet(X)\nclass NestMLP(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu'))\n self.dense = nn.Dense(16, activation='relu')\n def forward(self, X):\n return self.dense(self.net(X))\nchimera = nn.Sequential()\nchimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP())\nchimera.initialize()\nchimera(X)", "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nX = tf.random.uniform((2, 20))\nnet(X)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, X):\n return self.out(self.hidden((X)))\nnet = MLP()\nnet(X)\nclass MySequential(tf.keras.Model):\n def __init__(self, *args):\n super().__init__()\n self.modules = []\n for block in args:\n self.modules.append(block)\n def call(self, X):\n for module in self.modules:\n X = module(X)\n return X\nnet = MySequential(\n tf.keras.layers.Dense(units=256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10))\nnet(X)\nclass FixedHiddenMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.rand_weight = tf.constant(tf.random.uniform((20, 20)))\n self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)\n def call(self, inputs):\n X = self.flatten(inputs)\n X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)\n X = self.dense(X)\n while tf.reduce_sum(tf.math.abs(X)) > 1:\n X /= 2\n return tf.reduce_sum(X)\nnet = FixedHiddenMLP()\nnet(X)\nclass NestMLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.net = tf.keras.Sequential()\n self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))\n self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))\n self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)\n def call(self, inputs):\n return self.dense(self.net(inputs))\nchimera = tf.keras.Sequential()\nchimera.add(NestMLP())\nchimera.add(tf.keras.layers.Dense(20))\nchimera.add(FixedHiddenMLP())\nchimera(X)"} +{"id": 540, "mxnet": "from mxnet import init, np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Dense(8, activation='relu'))\nnet.add(nn.Dense(1))\nnet.initialize()\nX = np.random.uniform(size=(2, 4))\nnet(X)\nnet.collect_params()['dense1_bias'].data()\ndef block1():\n net = nn.Sequential()\n net.add(nn.Dense(32, activation='relu'))\n net.add(nn.Dense(16, activation='relu'))\n return net\ndef block2():\n net = nn.Sequential()\n for _ in range(4):\n net.add(block1())\n return net\nrgnet = nn.Sequential()\nrgnet.add(block2())\nrgnet.add(nn.Dense(10))\nrgnet.initialize()\nrgnet(X)\nnet.initialize(init=init.Normal(sigma=0.01), force_reinit=True)\nnet[0].weight.data()[0]\nnet.initialize(init=init.Constant(1), force_reinit=True)\nnet[0].weight.data()[0]\nnet[0].weight.initialize(init=init.Xavier(), force_reinit=True)\nnet[1].initialize(init=init.Constant(42), force_reinit=True)\nclass MyInit(init.Initializer):\n def _init_weight(self, name, data):\n data[:] = np.random.uniform(-10, 10, data.shape)\n data *= np.abs(data) >= 5\nnet.initialize(MyInit(), force_reinit=True)\nnet[0].weight.data()[:2]\nnet[0].weight.data()[:] += 1\nnet[0].weight.data()[0, 0] = 42\nnet[0].weight.data()[0]\nlayer = CenteredLayer()\nlayer(np.array([1, 2, 3, 4, 5]))\nnet = nn.Sequential()\nnet.add(nn.Dense(128), CenteredLayer())\nnet.initialize()", "tensorflow": "import tensorflow as tf\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu),\n tf.keras.layers.Dense(1),\n])\nX = tf.random.uniform((2, 4))\nnet(X)\nnet.get_weights()[1]\ndef block1(name):\n return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)\ndef block2():\n net = tf.keras.Sequential()\n for i in range(4):\n net.add(block1(name=f'block-{i}'))\n return net\nrgnet = tf.keras.Sequential()\nrgnet.add(block2())\nrgnet.add(tf.keras.layers.Dense(1))\nrgnet(X)\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1)])\nnet(X)\nnet.weights[0], net.weights[1]\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),\n tf.keras.layers.Dense(1),\n])\nnet(X)\nnet.weights[0], net.weights[1]\n net = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),\n tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),\n])\nnet(X)\nclass MyInit(tf.keras.initializers.Initializer):\n def __call__(self, shape, dtype=None):\n data=tf.random.uniform(shape, -10, 10, dtype=dtype)\n factor=(tf.abs(data) >= 5)\n factor=tf.cast(factor, tf.float32)\n return data * factor\nnet = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),\n tf.keras.layers.Dense(1))\nnet(X)\nnet.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)\nnet.layers[1].weights[0][0, 0].assign(42)\nnet.layers[1].weights[0]\nlayer = CenteredLayer()\nlayer(tf.constant([1, 2, 3, 4, 5]))\nnet = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])"} +{"id": 541, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nclass CenteredLayer(nn.Block):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n def forward(self, X):\n return X - X.mean()\nY = net(np.random.uniform(size=(4, 8)))\nY.mean()\nclass MyDense(nn.Block):\n def __init__(self, units, in_units, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=(in_units, units))\n self.bias = self.params.get('bias', shape=(units,))\n def forward(self, x):\n linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data(\n ctx=x.ctx)\n return npx.relu(linear)\ndense = MyDense(units=3, in_units=5)\ndense.params\ndense.initialize()\ndense(np.random.uniform(size=(2, 5)))\nnet = nn.Sequential()\nnet.add(MyDense(8, in_units=64), MyDense(1, in_units=8))\nnet.initialize()\nnet(np.random.uniform(size=(2, 64)))", "tensorflow": "import tensorflow as tf\nclass CenteredLayer(tf.keras.Model):\n def __init__(self):\n super().__init__()\n def call(self, inputs):\n return inputs - tf.reduce_mean(inputs)\nY = net(tf.random.uniform((4, 8)))\ntf.reduce_mean(Y)\nclass MyDense(tf.keras.Model):\n def __init__(self, units):\n super().__init__()\n self.units = units\n def build(self, X_shape):\n self.weight = self.add_weight(name='weight',\n shape=[X_shape[-1], self.units],\n initializer=tf.random_normal_initializer())\n self.bias = self.add_weight(\n name='bias', shape=[self.units],\n initializer=tf.zeros_initializer())\n def call(self, X):\n linear = tf.matmul(X, self.weight) + self.bias\n return tf.nn.relu(linear)\ndense = MyDense(3)\ndense(tf.random.uniform((2, 5)))\ndense.get_weights()\ndense(tf.random.uniform((2, 5)))\nnet = tf.keras.models.Sequential([MyDense(8), MyDense(1)])\nnet(tf.random.uniform((2, 64)))"} +{"id": 542, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nx = np.arange(4)\nnpx.save('x-file', x)\nx2 = npx.load('x-file')\ny = np.zeros(4)\nnpx.save('x-files', [x, y])\nx2, y2 = npx.load('x-files')\nmydict = {'x': x, 'y': y}\nnpx.save('mydict', mydict)\nmydict2 = npx.load('mydict')\nclass MLP(nn.Block):\n def __init__(self, **kwargs):\n super(MLP, self).__init__(**kwargs)\n self.hidden = nn.Dense(256, activation='relu')\n self.output = nn.Dense(10)\n def forward(self, x):\n return self.output(self.hidden(x))\nnet = MLP()\nnet.initialize()\nX = np.random.uniform(size=(2, 20))\nY = net(X)\nnet.save_parameters('mlp.params')\nclone = MLP()\nclone.load_parameters('mlp.params')", "tensorflow": "import numpy as np\nimport tensorflow as tf\nx = tf.range(4)\nnp.save('x-file.npy', x)\nx2 = np.load('x-file.npy', allow_pickle=True)\ny = tf.zeros(4)\nnp.save('xy-files.npy', [x, y])\nx2, y2 = np.load('xy-files.npy', allow_pickle=True)\nmydict = {'x': x, 'y': y}\nnp.save('mydict.npy', mydict)\nmydict2 = np.load('mydict.npy', allow_pickle=True)\nclass MLP(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.flatten = tf.keras.layers.Flatten()\n self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)\n self.out = tf.keras.layers.Dense(units=10)\n def call(self, inputs):\n x = self.flatten(inputs)\n x = self.hidden(x)\n return self.out(x)\nnet = MLP()\nX = tf.random.uniform((2, 20))\nY = net(X)\nnet.save_weights('mlp.params')\nclone = MLP()\nclone.load_weights('mlp.params')"} +{"id": 543, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\nnpx.cpu(), npx.gpu(), npx.gpu(1)\nnpx.num_gpus()\ndef try_gpu(i=0):\n return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()\n def try_all_gpus():\ndevices = [npx.gpu(i) for i in range(npx.num_gpus())]\n return devices if devices else [npx.cpu()]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = np.array([1, 2, 3])\nx.ctx\nX = np.ones((2, 3), ctx=try_gpu())\nY = np.random.uniform(size=(2, 3), ctx=try_gpu(1))\nZ = X.copyto(try_gpu(1))\nZ.as_in_ctx(try_gpu(1)) is Z\nnet = nn.Sequential()\nnet.add(nn.Dense(1))\nnet.initialize(ctx=try_gpu())\nnet[0].weight.data().ctx", "tensorflow": "import tensorflow as tf\ntf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')\nlen(tf.config.experimental.list_physical_devices('GPU'))\ndef try_gpu(i=0):\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')\ndef try_all_gpus():\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]\ntry_gpu(), try_gpu(10), try_all_gpus()\nx = tf.constant([1, 2, 3])\nx.device\nwith try_gpu():\n X = tf.ones((2, 3))\nwith try_gpu(1):\n Y = tf.random.uniform((2, 3))\nwith try_gpu(1):\n Z = X\nwith try_gpu(1):\n Z2 = Z\nZ2 is Z\nstrategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n net = tf.keras.models.Sequential([\n tf.keras.layers.Dense(1)])\nnet.layers[0].weights[0].device, net.layers[0].weights[1].device"} +{"id": 544, "mxnet": "from mxnet import autograd, np, npx from mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d(X, K):\n h, w = K.shape\n Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i:i + h, j:j + w] * K).sum()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = np.array([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(nn.Block):\n def __init__(self, kernel_size, **kwargs):\n super().__init__(**kwargs)\n self.weight = self.params.get('weight', shape=kernel_size)\n self.bias = self.params.get('bias', shape=(1,))\n def forward(self, x):\n return corr2d(x, self.weight.data()) + self.bias.data()\nX = np.ones((6, 8))\nX[:, 2:6] = 0\nK = np.array([[1.0, -1.0]])\ncorr2d(d2l.transpose(X), K)\nconv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False)\nconv2d.initialize()\n\nX = X.reshape(1, 1, 6, 8)\nY = Y.reshape(1, 1, 6, 7)\nlr = 3e-2\nfor i in range(10):\n with autograd.record():\n Y_hat = conv2d(X)\n l = (Y_hat - Y) ** 2\n l.backward()\n conv2d.weight.data()[:] -= lr * conv2d.weight.grad()\nconv2d.weight.data().reshape((1, 2))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d(X, K):\n h, w = K.shape\n Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j].assign(tf.reduce_sum(\n X[i: i + h, j: j + w] * K))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\nK = tf.constant([[0.0, 1.0], [2.0, 3.0]])\ncorr2d(X, K)\nclass Conv2D(tf.keras.layers.Layer):\n def __init__(self):\n super().__init__()\n def build(self, kernel_size):\n initializer = tf.random_normal_initializer()\n self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)\n self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)\n def call(self, inputs):\n return corr2d(inputs, self.weight) + self.bias\nX = tf.Variable(tf.ones((6, 8)))\nX[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))\nK = tf.constant([[1.0, -1.0]])\ncorr2d(tf.transpose(X), K)\nconv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)\nX = tf.reshape(X, (1, 6, 8, 1))\nY = tf.reshape(Y, (1, 6, 7, 1))\nlr = 3e-2\nY_hat = conv2d(X)\nfor i in range(10):\n with tf.GradientTape(watch_accessed_variables=False) as g:\n g.watch(conv2d.weights[0])\n Y_hat = conv2d(X)\n l = (abs(Y_hat - Y)) ** 2\n update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))\n weights = conv2d.get_weights()\n weights[0] = conv2d.weights[0] - update\n conv2d.set_weights(weights)\ntf.reshape(conv2d.get_weights()[0], (1, 2))"} +{"id": 545, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nnpx.set_np()\ndef comp_conv2d(conv2d, X):\n conv2d.initialize()\n X = X.reshape((1, 1) + X.shape)\n Y = conv2d(X)\n return Y.reshape(Y.shape[2:])\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1)\nX = np.random.uniform(size=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1))\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4))\ncomp_conv2d(conv2d, X).shape", "tensorflow": "import tensorflow as tf\n\ndef comp_conv2d(conv2d, X):\n X = tf.reshape(X, (1, ) + X.shape + (1, ))\n Y = conv2d(X)\n return tf.reshape(Y, Y.shape[1:3])\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')\nX = tf.random.uniform(shape=(8, 8))\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)\ncomp_conv2d(conv2d, X).shape\nconv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))\ncomp_conv2d(conv2d, X).shape"} +{"id": 546, "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef corr2d_multi_in(X, K):\n return sum(d2l.corr2d(x, k) for x, k in zip(X, K))\nX = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return np.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = np.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.reshape((c_i, h * w))\n K = K.reshape((c_o, c_i))\n Y = np.dot(K, X)\n return Y.reshape((c_o, h, w))\nX = np.random.normal(0, 1, (3, 3, 3))\nK = np.random.normal(0, 1, (2, 3, 1, 1))\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(np.abs(Y1 - Y2).sum()) < 1e-6", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef corr2d_multi_in(X, K):\n return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)\nX = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])\nK = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])\ncorr2d_multi_in(X, K)\ndef corr2d_multi_in_out(X, K):\n return tf.stack([corr2d_multi_in(X, k) for k in K], 0)\nK = tf.stack((K, K + 1, K + 2), 0)\nK.shape\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = tf.reshape(X, (c_i, h * w))\n K = tf.reshape(K, (c_o, c_i))\n Y = tf.matmul(K, X)\n return tf.reshape(Y, (c_o, h, w))\nX = tf.random.normal((3, 3, 3), 0, 1)\nK = tf.random.normal((2, 3, 1, 1), 0, 1)\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\nassert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6"} +{"id": 547, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j] = X[i: i + p_h, j: j + p_w].max()\n elif mode == 'avg':\n Y[i, j] = X[i: i + p_h, j: j + p_w].mean()\n return Y\nX = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4))\npool2d = nn.MaxPool2D(3)\npool2d(X)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)\npool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3))\npool2d(X)\nX = np.concatenate((X, X + 1), 1)\npool2d = nn.MaxPool2D(3, padding=1, strides=2)\npool2d(X)", "tensorflow": "import tensorflow as tf\ndef pool2d(X, pool_size, mode='max'):\n p_h, p_w = pool_size\n Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n if mode == 'max':\n Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))\n elif mode =='avg':\n Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))\n return Y\nX = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])\npool2d(X, (2, 2))\nX = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])\npool2d(X)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)\npaddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',\n strides=(2, 3))\npool2d(X_padded)\nX = tf.concat([X, X + 1], 3)\npaddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])\nX_padded = tf.pad(X, paddings, \"CONSTANT\")\npool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',\n strides=2)\npool2d(X_padded)"} +{"id": 548, "mxnet": "from mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120, activation='sigmoid'),\n nn.Dense(84, activation='sigmoid'),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 28, 28))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)\ndef train_ch6(net, train_iter, test_iter, num_epochs, lr, device):\n net.initialize(force_reinit=True, ctx=device, init=init.Xavier())\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n timer, num_batches = d2l.Timer(), len(train_iter)\n for epoch in range(num_epochs):\n metric = d2l.Accumulator(3)\n for i, (X, y) in enumerate(train_iter):\n timer.start()\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat = net(X)\n l = loss(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])\n timer.stop()\n train_l = metric[0] / metric[2]\n train_acc = metric[1] / metric[2]\n if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:\n animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))\n test_acc = evaluate_accuracy_gpu(net, test_iter)\n animator.add(epoch + 1, (None, None, test_acc))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120, activation='sigmoid'),\n tf.keras.layers.Dense(84, activation='sigmoid'),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 28, 28, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape: \t', X.shape)\nclass TrainCallback(tf.keras.callbacks.Callback):\n def __init__(self, net, train_iter, test_iter, num_epochs, device_name):\n self.timer = d2l.Timer()\n self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])\n self.net = net\n self.train_iter = train_iter\n self.test_iter = test_iter\n self.num_epochs = num_epochs\n self.device_name = device_name\n def on_epoch_begin(self, epoch, logs=None):\n self.timer.start()\n def on_epoch_end(self, epoch, logs):\n self.timer.stop()\n test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']\n metrics = (logs['loss'], logs['accuracy'], test_acc)\n self.animator.add(epoch + 1, metrics)\n if epoch == self.num_epochs - 1:\n batch_size = next(iter(self.train_iter))[0].shape[0]\n num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()\ndef train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net"} +{"id": 549, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nnet = nn.Sequential()\nnet.add(\n nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),\n nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__, 'output shape:\t', X.shape)"} +{"id": 550, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = nn.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(4096, activation='relu'), nn.Dropout(0.5),\n nn.Dense(10))\n return net\nnet = vgg(conv_arch)\nnet.initialize()\nX = np.random.uniform(size=(1, 1, 224, 224))\nfor blk in net:\n X = blk(X)\n print(blk.name, 'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = vgg(small_conv_arch)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef vgg_block(num_convs, num_channels):\n blk = tf.keras.models.Sequential()\n for _ in range(num_convs):\n blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))\n blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))\n return blk\ndef vgg(conv_arch):\n net = tf.keras.models.Sequential()\n for (num_convs, num_channels) in conv_arch:\n net.add(vgg_block(num_convs, num_channels))\n net.add(tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(4096, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10)]))\n return net\nnet = vgg(conv_arch)\nX = tf.random.uniform((1, 224, 224, 1))\nfor blk in net.layers:\n X = blk(X)\n print(blk.__class__.__name__,'output shape:\t', X.shape)\nratio = 4\nsmall_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\nnet = lambda: vgg(small_conv_arch)"} +{"id": 551, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef nin_block(num_channels, kernel_size, strides, padding):\n blk = nn.Sequential()\n blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'),\n nn.Conv2D(num_channels, kernel_size=1, activation='relu'))\n return blk\nnet = nn.Sequential()\nnet.add(nin_block(96, kernel_size=11, strides=4, padding=0),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding=2),\n nn.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding=1),\n nn.MaxPool2D(pool_size=3, strides=2),\n nn.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding=1),\n nn.GlobalAvgPool2D(),\n nn.Flatten())\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.name, 'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef nin_block(num_channels, kernel_size, strides, padding):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),\n tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])\ndef net():\n return tf.keras.models.Sequential([\n nin_block(96, kernel_size=11, strides=4, padding='valid'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(256, kernel_size=5, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n nin_block(384, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2),\n tf.keras.layers.Dropout(0.5),\n nin_block(10, kernel_size=3, strides=1, padding='same'),\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Reshape((1, 1, 10)),\n tf.keras.layers.Flatten(),\n ])\nX = tf.random.uniform((1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 552, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Inception(nn.Block):\n def __init__(self, c1, c2, c3, c4, **kwargs):\n super(Inception, self).__init__(**kwargs)\n self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n def forward(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return np.concatenate((p1, p2, p3, p4), axis=1)\nb1 = nn.Sequential()\nb1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb2 = nn.Sequential()\nb2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),\n nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb3 = nn.Sequential()\nb3.add(Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb4 = nn.Sequential()\nb4.add(Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nb5 = nn.Sequential()\nb5.add(Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n nn.GlobalAvgPool2D())\nnet = nn.Sequential()\nnet.add(b1, b2, b3, b4, b5, nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 96, 96))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Inception(tf.keras.Model):\n def __init__(self, c1, c2, c3, c4):\n super().__init__()\n self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')\n self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')\n self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')\n self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')\n self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')\n self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')\n self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')\n def call(self, x):\n p1 = self.p1_1(x)\n p2 = self.p2_2(self.p2_1(x))\n p3 = self.p3_2(self.p3_1(x))\n p4 = self.p4_2(self.p4_1(x))\n return tf.keras.layers.Concatenate()([p1, p2, p3, p4])\ndef b1():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b2():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, 1, activation='relu'),\n tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b3():\n return tf.keras.models.Sequential([\n Inception(64, (96, 128), (16, 32), 32),\n Inception(128, (128, 192), (32, 96), 64),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b4():\n return tf.keras.Sequential([\n Inception(192, (96, 208), (16, 48), 64),\n Inception(160, (112, 224), (24, 64), 64),\n Inception(128, (128, 256), (24, 64), 64),\n Inception(112, (144, 288), (32, 64), 64),\n Inception(256, (160, 320), (32, 128), 128),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef b5():\n return tf.keras.Sequential([\n Inception(256, (160, 320), (32, 128), 128),\n Inception(384, (192, 384), (48, 128), 128),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Flatten()\n ])\ndef net():\n return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),\n tf.keras.layers.Dense(10)])\nX = tf.random.uniform(shape=(1, 96, 96, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 553, "mxnet": "from mxnet import autograd, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n if not autograd.is_training():\n X_hat = (X - moving_mean) / np.sqrt(moving_var + eps)\n else:\n assert len(X.shape) in (2, 4)\n if len(X.shape) == 2:\n mean = X.mean(axis=0)\n var = ((X - mean) ** 2).mean(axis=0)\n else:\n mean = X.mean(axis=(0, 2, 3), keepdims=True)\n var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n X_hat = (X - mean) / np.sqrt(var + eps)\n moving_mean = momentum * moving_mean + (1.0 - momentum) * mean\n moving_var = momentum * moving_var + (1.0 - momentum) * var\n Y = gamma * X_hat + beta\n return Y, moving_mean, moving_var\nclass BatchNorm(nn.Block):\n def __init__(self, num_features, num_dims, **kwargs):\n super().__init__(**kwargs)\n if num_dims == 2:\n shape = (1, num_features)\n else:\n shape = (1, num_features, 1, 1)\n self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n self.moving_mean = np.zeros(shape)\n self.moving_var = np.ones(shape)\n def forward(self, X):\n if self.moving_mean.ctx != X.ctx:\n self.moving_mean = self.moving_mean.copyto(X.ctx)\n self.moving_var = self.moving_var.copyto(X.ctx)\n Y, self.moving_mean, self.moving_var = batch_norm(\n X, self.gamma.data(), self.beta.data(), self.moving_mean,\n self.moving_var, eps=1e-12, momentum=0.9)\n return Y\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n BatchNorm(6, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n BatchNorm(16, num_dims=4),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n BatchNorm(120, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n BatchNorm(84, num_dims=2),\n nn.Activation('sigmoid'),\n nn.Dense(10))\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nd2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\nnet[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,)\nnet = nn.Sequential()\nnet.add(nn.Conv2D(6, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Conv2D(16, kernel_size=5),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.AvgPool2D(pool_size=2, strides=2),\n nn.Dense(120),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(84),\n nn.BatchNorm(),\n nn.Activation('sigmoid'),\n nn.Dense(10))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef batch_norm(X, gamma, beta, moving_mean, moving_var, eps):\n inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)\n inv *= gamma\n Y = X * inv + (beta - moving_mean * inv)\n return Y\nclass BatchNorm(tf.keras.layers.Layer):\n def __init__(self, **kwargs):\n super(BatchNorm, self).__init__(**kwargs)\n def build(self, input_shape):\n weight_shape = [input_shape[-1], ]\n self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)\n self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)\n self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)\n self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)\n super(BatchNorm, self).build(input_shape)\n def assign_moving_average(self, variable, value):\n momentum = 0.9\n delta = variable * momentum + value * (1 - momentum)\n return variable.assign(delta)\n @tf.function\n def call(self, inputs, training):\n if training:\n axes = list(range(len(inputs.shape) - 1))\n batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)\n batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)\n batch_mean = tf.squeeze(batch_mean, axes)\n batch_variance = tf.squeeze(batch_variance, axes)\n mean_update = self.assign_moving_average(self.moving_mean, batch_mean)\n variance_update = self.assign_moving_average(self.moving_variance, batch_variance)\n self.add_update(mean_update)\n self.add_update(variance_update)\n mean, variance = batch_mean, batch_variance\n else:\n mean, variance = self.moving_mean, self.moving_variance\n output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)\n return output\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n BatchNorm(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10)]\n )\nlr, num_epochs, batch_size = 1.0, 10, 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nnet = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\ntf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))\ndef net():\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Conv2D(filters=16, kernel_size=5),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.AvgPool2D(pool_size=2, strides=2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(84),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('sigmoid'),\n tf.keras.layers.Dense(10),\n ])"} +{"id": 554, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nclass Residual(nn.Block):\n def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n super().__init__(**kwargs)\n self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm()\n self.bn2 = nn.BatchNorm()\n def forward(self, X):\n Y = npx.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return npx.relu(Y + X)\nblk = Residual(3)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 6, 6))\nblk(X).shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk.initialize()\nblk(X).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\ndef resnet_block(num_channels, num_residuals, first_block=False):\n blk = nn.Sequential()\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n blk.add(Residual(num_channels))\n return blk\nnet.add(resnet_block(64, 2, first_block=True),\n resnet_block(128, 2),\n resnet_block(256, 2),\n resnet_block(512, 2))\nnet.add(nn.GlobalAvgPool2D(), nn.Dense(10))\nX = np.random.uniform(size=(1, 1, 224, 224))\nnet.initialize()\nfor layer in net:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass Residual(tf.keras.Model):\n def __init__(self, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = tf.keras.layers.Conv2D(\n num_channels, padding='same', kernel_size=3, strides=strides)\n self.conv2 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=3, padding='same')\n self.conv3 = None\n if use_1x1conv:\n self.conv3 = tf.keras.layers.Conv2D(\n num_channels, kernel_size=1, strides=strides)\n self.bn1 = tf.keras.layers.BatchNormalization()\n self.bn2 = tf.keras.layers.BatchNormalization()\n def call(self, X):\n Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3 is not None:\n X = self.conv3(X)\n Y += X\n return tf.keras.activations.relu(Y)\nblk = Residual(3)\nX = tf.random.uniform((4, 6, 6, 3))\nY = blk(X)\nY.shape\nblk = Residual(6, use_1x1conv=True, strides=2)\nblk(X).shape\nb1 = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\nclass ResnetBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):\n super(ResnetBlock, self).__init__(**kwargs)\n self.residual_layers = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))\n else:\n self.residual_layers.append(Residual(num_channels))\n def call(self, X):\n for layer in self.residual_layers.layers:\n X = layer(X)\n return X\nb2 = ResnetBlock(64, 2, first_block=True)\nb3 = ResnetBlock(128, 2)\nb4 = ResnetBlock(256, 2)\nb5 = ResnetBlock(512, 2)\ndef net():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),\n ResnetBlock(64, 2, first_block=True),\n ResnetBlock(128, 2),\n ResnetBlock(256, 2),\n ResnetBlock(512, 2),\n tf.keras.layers.GlobalAvgPool2D(),\n tf.keras.layers.Dense(units=10)])\nX = tf.random.uniform(shape=(1, 224, 224, 1))\nfor layer in net().layers:\n X = layer(X)\n print(layer.__class__.__name__,'output shape:\t', X.shape)"} +{"id": 555, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef conv_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=3, padding=1))\n return blk\nclass DenseBlock(nn.Block):\n def __init__(self, num_convs, num_channels, **kwargs):\n super().__init__(**kwargs)\n self.net = nn.Sequential()\n for _ in range(num_convs):\n self.net.add(conv_block(num_channels))\n def forward(self, X):\n for blk in self.net:\n Y = blk(X)\n X = np.concatenate((X, Y), axis=1)\n return X\nblk = DenseBlock(2, 10)\nblk.initialize()\nX = np.random.uniform(size=(4, 3, 8, 8))\nY = blk(X)\nY.shape\ndef transition_block(num_channels):\n blk = nn.Sequential()\n blk.add(nn.BatchNorm(), nn.Activation('relu'),\n nn.Conv2D(num_channels, kernel_size=1),\n nn.AvgPool2D(pool_size=2, strides=2))\n return blk\nblk = transition_block(10)\nblk.initialize()\nblk(Y).shape\nnet = nn.Sequential()\nnet.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.MaxPool2D(pool_size=3, strides=2, padding=1))\nnum_channels, growth_rate = 64, 32\nnum_convs_in_dense_blocks = [4, 4, 4, 4]\nfor i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(transition_block(num_channels))\nnet.add(nn.BatchNorm(),\n nn.Activation('relu'),\n nn.GlobalAvgPool2D(),\n nn.Dense(10))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nclass ConvBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels):\n super(ConvBlock, self).__init__()\n self.bn = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')\n self.listLayers = [self.bn, self.relu, self.conv]\n def call(self, x):\n y = x\n for layer in self.listLayers.layers:\n y = layer(y)\n y = tf.keras.layers.concatenate([x,y], axis=-1)\n return y\nclass DenseBlock(tf.keras.layers.Layer):\n def __init__(self, num_convs, num_channels):\n super(DenseBlock, self).__init__()\n self.listLayers = []\n for _ in range(num_convs):\n self.listLayers.append(ConvBlock(num_channels))\n def call(self, x):\n for layer in self.listLayers.layers:\n x = layer(x)\n return x\nblk = DenseBlock(2, 10)\nX = tf.random.uniform((4, 8, 8, 3))\nY = blk(X)\nY.shape\nclass TransitionBlock(tf.keras.layers.Layer):\n def __init__(self, num_channels, **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n self.batch_norm = tf.keras.layers.BatchNormalization()\n self.relu = tf.keras.layers.ReLU()\n self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)\n self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)\n def call(self, x):\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.conv(x)\n return self.avg_pool(x)\nblk = TransitionBlock(10)\nblk(Y).shape\ndef block_1():\n return tf.keras.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.ReLU(),\n tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])\ndef block_2():\n net = block_1()\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = [4, 4, 4, 4]\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n net.add(DenseBlock(num_convs, growth_rate))\n num_channels += num_convs * growth_rate\n if i != len(num_convs_in_dense_blocks) - 1:\n num_channels //= 2\n net.add(TransitionBlock(num_channels))\n return net\ndef net():\n net = block_2()\n net.add(tf.keras.layers.BatchNormalization())\n net.add(tf.keras.layers.ReLU())\n net.add(tf.keras.layers.GlobalAvgPool2D())\n net.add(tf.keras.layers.Flatten())\n net.add(tf.keras.layers.Dense(10))\n return net"} +{"id": 556, "mxnet": "%matplotlib inline\nfrom mxnet import autograd, gluon, init, np, npx\nfrom mxnet.gluon import nn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nT = 1000\ntime = np.arange(1, T + 1, dtype=np.float32)\nx = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,))\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = np.zeros((T - tau, tau))\nfor i in range(tau):\n features[:, i] = x[i: T - tau + i]\nlabels = x[tau:].reshape((-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = nn.Sequential()\n net.add(nn.Dense(10, activation='relu'),\n nn.Dense(1))\n net.initialize(init.Xavier())\n return net\nloss = gluon.loss.L2Loss()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})\n for epoch in range(epochs):\n for X, y in train_iter:\n with autograd.record():\n l = loss(net(X), y)\n l.backward()\n trainer.step(batch_size)\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.asnumpy(), onestep_preds.asnumpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = np.zeros(T)\nmultistep_preds[: n_train + tau] = x[: n_train + tau]\nfor i in range(n_train + tau, T):\n multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.asnumpy(), onestep_preds.asnumpy(),\n multistep_preds[n_train + tau:].asnumpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = np.zeros((T - tau - max_steps + 1, tau + max_steps))\nfor i in range(tau):\n features[:, i] = x[i: i + T - tau - max_steps + 1]\nfor i in range(tau, tau + max_steps):\n features[:, i] = net(features[:, i - tau:i]).reshape(-1)\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))", "tensorflow": "%matplotlib inline\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nT = 1000\ntime = tf.range(1, T + 1, dtype=tf.float32)\nx = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)\nd2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))\ntau = 4\nfeatures = tf.Variable(tf.zeros((T - tau, tau)))\nfor i in range(tau):\n features[:, i].assign(x[i: T - tau + i])\nlabels = tf.reshape(x[tau:], (-1, 1))\nbatch_size, n_train = 16, 600\ntrain_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)\ndef get_net():\n net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),\n tf.keras.layers.Dense(1)])\n return net\nloss = tf.keras.losses.MeanSquaredError()\ndef train(net, train_iter, loss, epochs, lr):\n trainer = tf.keras.optimizers.Adam()\n for epoch in range(epochs):\n for X, y in train_iter:\n with tf.GradientTape() as g:\n out = net(X)\n l = loss(y, out)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n trainer.apply_gradients(zip(grads, params))\nnet = get_net()\ntrain(net, train_iter, loss, 5, 0.01)\nonestep_preds = net(features)\nd2l.plot([time, time[tau:]],\n [x.numpy(), onestep_preds.numpy()], 'time',\n 'x', legend=['data', '1-step preds'], xlim=[1, 1000],\n figsize=(6, 3))\nmultistep_preds = tf.Variable(tf.zeros(T))\nmultistep_preds[:n_train + tau].assign(x[:n_train + tau])\nfor i in range(n_train + tau, T):\n multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))\nd2l.plot([time, time[tau:], time[n_train + tau:]],\n [x.numpy(), onestep_preds.numpy(),\n multistep_preds[n_train + tau:].numpy()], 'time',\n 'x', legend=['data', '1-step preds', 'multistep preds'],\n xlim=[1, 1000], figsize=(6, 3))\nmax_steps = 64\nfeatures = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))\nfor i in range(tau):\n features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())\nfor i in range(tau, tau + max_steps):\n features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))\nsteps = (1, 4, 16, 64)\nd2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],\n [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',\n legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],\n figsize=(6, 3))"} +{"id": 557, "mxnet": "import collections\nimport re\nfrom d2l import mxnet as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)", "tensorflow": "import collections\nimport re\nfrom d2l import tensorflow as d2l\ndef tokenize(lines, token='word'):\n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('Error: Unknown word element type:' + token)\ntokens = tokenize(lines)\nfor i in range(11):\n print(tokens[i])\ndef load_corpus_time_machine(max_tokens=-1):\n lines = read_time_machine()\n tokens = tokenize(lines, 'char')\n vocab = Vocab(tokens)\n corpus = [vocab[token] for line in tokens for token in line]\n if max_tokens > 0:\n corpus = corpus[:max_tokens]\n return corpus, vocab\ncorpus, vocab = load_corpus_time_machine()\nlen(corpus), len(vocab)"} +{"id": 558, "mxnet": "import random\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield np.array(X), np.array(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = np.array(corpus[offset: offset + num_tokens])\n Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens])\n Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_steps * num_batches, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y", "tensorflow": "import random\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ntokens = d2l.tokenize(d2l.read_time_machine())\ncorpus = [token for line in tokens for token in line]\nvocab = d2l.Vocab(corpus)\nvocab.token_freqs[:10]\ndef seq_data_iter_random(corpus, batch_size, num_steps):\n corpus = corpus[random.randint(0, num_steps - 1):]\n num_subseqs = (len(corpus) - 1) // num_steps\n initial_indices = list(range(0, num_subseqs * num_steps, num_steps))\n random.shuffle(initial_indices)\n def data(pos):\n return corpus[pos: pos + num_steps]\n num_batches = num_subseqs // batch_size\n for i in range(0, batch_size * num_batches, batch_size):\n initial_indices_per_batch = initial_indices[i: i + batch_size]\n X = [data(j) for j in initial_indices_per_batch]\n Y = [data(j + 1) for j in initial_indices_per_batch]\n yield tf.constant(X), tf.constant(Y)\ndef seq_data_iter_sequential(corpus, batch_size, num_steps):\n offset = random.randint(0, num_steps)\n num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size\n Xs = tf.constant(corpus[offset: offset + num_tokens])\n Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])\n Xs = tf.reshape(Xs, (batch_size, -1))\n Ys = tf.reshape(Ys, (batch_size, -1))\n num_batches = Xs.shape[1] // num_steps\n for i in range(0, num_batches * num_steps, num_steps):\n X = Xs[:, i: i + num_steps]\n Y = Ys[:, i: i + num_steps]\n yield X, Y"} +{"id": 559, "mxnet": "from mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nX, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4))\nH, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4))\nnp.dot(X, W_xh) + np.dot(H, W_hh)\nnp.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nX, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)\nH, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)\ntf.matmul(X, W_xh) + tf.matmul(H, W_hh)\ntf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))"} +{"id": 560, "mxnet": "%matplotlib inline\nimport math\nfrom mxnet import autograd, gluon, np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnpx.one_hot(np.array([0, 2]), len(vocab))\nX = np.arange(10).reshape((2, 5))\nnpx.one_hot(X.T, 28).shape\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n W_xh = normal((num_inputs, num_hiddens))\n W_hh = normal((num_hiddens, num_hiddens))\n b_h = np.zeros(num_hiddens, ctx=device)\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_rnn_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.params = get_params(vocab_size, num_hiddens, device)\n self.init_state, self.forward_fn = init_state, forward_fn\n def __call__(self, X, state):\n X = npx.one_hot(X.T, self.vocab_size)\n return self.forward_fn(X, state, self.params)\n def begin_state(self, batch_size, ctx):\n return self.init_state(batch_size, self.num_hiddens, ctx)\nnum_hiddens = 512\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\nstate = net.begin_state(X.shape[0], d2l.try_gpu())\nY, new_state = net(X.as_in_context(d2l.try_gpu()), state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab, device):\n state = net.begin_state(batch_size=1, ctx=device)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1))\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())\ndef grad_clipping(net, theta):\n if isinstance(net, gluon.Block):\n params = [p.data() for p in net.collect_params().values()]\n else:\n params = net.params\n norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))\n if norm > theta:\n for param in params:\n param.grad[:] *= theta / norm\ndef train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], ctx=device)\n else:\n for s in state:\n s.detach()\n y = Y.T.reshape(-1)\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n with autograd.record():\n y_hat, state = net(X, state)\n l = loss(y_hat, y).mean()\n l.backward()\n grad_clipping(net, 1)\n updater(batch_size=1)\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n loss = gluon.loss.SoftmaxCrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n if isinstance(net, gluon.Block):\n net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01))\n trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n updater = lambda batch_size: trainer.step(batch_size)\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())\nnet = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn)\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)", "tensorflow": "%matplotlib inline\nimport math\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ntrain_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)\ntf.one_hot(tf.constant([0, 2]), len(vocab))\nX = tf.reshape(tf.range(10), (2, 5))\ntf.one_hot(tf.transpose(X), 28).shape\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)\n W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)\n b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_rnn_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef rnn(inputs, state, params):\n W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nclass RNNModelScratch:\n def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):\n self.vocab_size, self.num_hiddens = vocab_size, num_hiddens\n self.init_state, self.forward_fn = init_state, forward_fn\n self.trainable_variables = get_params(vocab_size, num_hiddens)\n def __call__(self, X, state):\n X = tf.one_hot(tf.transpose(X), self.vocab_size)\n X = tf.cast(X, tf.float32)\n return self.forward_fn(X, state, self.trainable_variables)\n def begin_state(self, batch_size, *args, **kwargs):\n return self.init_state(batch_size, self.num_hiddens)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_hiddens = 512\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\nstate = net.begin_state(X.shape[0])\nY, new_state = net(X, state)\nY.shape, len(new_state), new_state[0].shape\ndef predict_ch8(prefix, num_preds, net, vocab):\n state = net.begin_state(batch_size=1, dtype=tf.float32)\n outputs = [vocab[prefix[0]]]\n get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),\n (1, 1)).numpy()\n for y in prefix[1:]:\n _, state = net(get_input(), state)\n outputs.append(vocab[y])\n for _ in range(num_preds):\n y, state = net(get_input(), state)\n outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))\n return ''.join([vocab.idx_to_token[i] for i in outputs])\npredict_ch8('time traveller ', 10, net, vocab)\ndef grad_clipping(grads, theta):\n theta = tf.constant(theta, dtype=tf.float32)\n new_grad = []\n for grad in grads:\n if isinstance(grad, tf.IndexedSlices):\n new_grad.append(tf.convert_to_tensor(grad))\n else:\n new_grad.append(grad)\n norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()\n for grad in new_grad))\n norm = tf.cast(norm, tf.float32)\n if tf.greater(norm, theta):\n for i, grad in enumerate(new_grad):\n new_grad[i] = grad * theta / norm\n else:\n new_grad = new_grad\n return new_grad\ndef train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):\n state, timer = None, d2l.Timer()\n metric = d2l.Accumulator(2)\n for X, Y in train_iter:\n if state is None or use_random_iter:\n state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)\n with tf.GradientTape(persistent=True) as g:\n y_hat, state = net(X, state)\n y = tf.reshape(tf.transpose(Y), (-1))\n l = loss(y, y_hat)\n params = net.trainable_variables\n grads = g.gradient(l, params)\n grads = grad_clipping(grads, 1)\n updater.apply_gradients(zip(grads, params))\n metric.add(l * d2l.size(y), d2l.size(y))\n return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()\ndef train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):\n with strategy.scope():\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n updater = tf.keras.optimizers.SGD(lr)\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)\n if (epoch + 1) % 10 == 0:\n animator.add(epoch + 1, [ppl])\n device = d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\ntrain_ch8(net, train_iter, vocab, lr, num_epochs, strategy)\nwith strategy.scope():\n net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)\ntrain_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)"} +{"id": 561, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import nn, rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_layer = rnn.RNN(num_hiddens)\nrnn_layer.initialize()\nstate = rnn_layer.begin_state(batch_size=batch_size)\nlen(state), state[0].shape\nX = np.random.uniform(size=(num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(nn.Block):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = nn.Dense(vocab_size)\n def forward(self, inputs, state):\n X = npx.one_hot(inputs.T, self.vocab_size)\n Y, state = self.rnn(X, state)\n output = self.dense(Y.reshape(-1, Y.shape[-1]))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.begin_state(*args, **kwargs)\ndevice = d2l.try_gpu()\nnet = RNNModel(rnn_layer, len(vocab))\nnet.initialize(force_reinit=True, ctx=device)\nd2l.predict_ch8('time traveller', 10, net, vocab, device)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\nnum_hiddens = 256\nrnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')\nrnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)\nstate = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)\nstate.shape\nX = tf.random.uniform((num_steps, batch_size, len(vocab)))\nY, state_new = rnn_layer(X, state)\nY.shape, len(state_new), state_new[0].shape\nclass RNNModel(tf.keras.layers.Layer):\n def __init__(self, rnn_layer, vocab_size, **kwargs):\n super(RNNModel, self).__init__(**kwargs)\n self.rnn = rnn_layer\n self.vocab_size = vocab_size\n self.dense = tf.keras.layers.Dense(vocab_size)\n def call(self, inputs, state):\n X = tf.one_hot(tf.transpose(inputs), self.vocab_size)\n Y, *state = self.rnn(X, state)\n output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))\n return output, state\n def begin_state(self, *args, **kwargs):\n return self.rnn.cell.get_initial_state(*args, **kwargs)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n net = RNNModel(rnn_layer, vocab_size=len(vocab))\nd2l.predict_ch8('time traveller', 10, net, vocab)\nnum_epochs, lr = 500, 1\nd2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 562, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_gru_state(batch_size, num_hiddens, device):\n return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z)\n R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r)\n H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\ngru_layer = rnn.GRU(num_hiddens)\nmodel = d2l.RNNModel(gru_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)\n def three():\n return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xz, W_hz, b_z = three()\n W_xr, W_hr, b_r = three()\n W_xh, W_hh, b_h = three()\n W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n return params\ndef init_gru_state(batch_size, num_hiddens):\n return (tf.zeros((batch_size, num_hiddens)), )\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n X = tf.reshape(X,[-1,W_xh.shape[0]])\n Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)\n R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)\n H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nnum_epochs, lr = 500, 1\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\ngru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')\ngru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 563, "mxnet": "from mxnet import np, npx\nfrom mxnet.gluon import rnn\nfrom d2l import mxnet as d2l\nnpx.set_np()\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens, device):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return np.random.normal(scale=0.01, size=shape, ctx=device)\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = np.zeros(num_outputs, ctx=device)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n for param in params:\n param.attach_grad()\n return params\ndef init_lstm_state(batch_size, num_hiddens, device):\n return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device))\ndef lstm(inputs, state, params):\n [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,\n W_hq, b_q] = params\n (H, C) = state\n outputs = []\n for X in inputs:\n I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i)\n F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f)\n O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o)\n C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * np.tanh(C)\n Y = np.dot(H, W_hq) + b_q\n outputs.append(Y)\n return np.concatenate(outputs, axis=0), (H, C)\nvocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()\nnum_epochs, lr = 500, 1\nmodel = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)\nlstm_layer = rnn.LSTM(num_hiddens)\nmodel = d2l.RNNModel(lstm_layer, len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)", "tensorflow": "import tensorflow as tf\nfrom d2l import tensorflow as d2l\nbatch_size, num_steps = 32, 35\ntrain_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)\ndef get_lstm_params(vocab_size, num_hiddens):\n num_inputs = num_outputs = vocab_size\n def normal(shape):\n return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))\n def three():\n return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))\n W_xi, W_hi, b_i = three()\n W_xf, W_hf, b_f = three()\n W_xo, W_ho, b_o = three()\n W_xc, W_hc, b_c = three()\n W_hq = normal((num_hiddens, num_outputs))\n b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)\n params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n return params\ndef init_lstm_state(batch_size, num_hiddens):\n return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))\ndef lstm(inputs, state, params):\n W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params\n (H, C) = state\n outputs = []\n for X in inputs:\n X=tf.reshape(X,[-1,W_xi.shape[0]])\n I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)\n F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)\n O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)\n C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)\n C = F * C + I * C_tilda\n H = O * tf.tanh(C)\n Y = tf.matmul(H, W_hq) + b_q\n outputs.append(Y)\n return tf.concat(outputs, axis=0), (H,C)\nvocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name\nnum_epochs, lr = 500, 1\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)\nlstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')\nlstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)\ndevice_name = d2l.try_gpu()._device_name\nstrategy = tf.distribute.OneDeviceStrategy(device_name)\nwith strategy.scope():\n model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))\nd2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)"} +{"id": 564, "mxnet": "import os\nfrom mxnet import np, npx\nfrom d2l import mxnet as d2l\nnpx.set_np()\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = np.array([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = (array != vocab['']).astype(np.int32).sum(1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', X.astype(np.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', Y.astype(np.int32))\n print('Valid length of Y:', Y_valid_len)\n break", "tensorflow": "import os\nimport tensorflow as tf\nfrom d2l import tensorflow as d2l\ndef build_array_nmt(lines, vocab, num_steps):\n lines = [vocab[l] for l in lines]\n lines = [l + [vocab['']] for l in lines]\n array = tf.constant([truncate_pad(l, num_steps, vocab['']) for l in lines])\n valid_len = tf.reduce_sum(\n tf.cast(array != vocab[''], tf.int32), 1)\n return array, valid_len\ntrain_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)\nfor X, X_valid_len, Y, Y_valid_len in train_iter:\n print('X:', tf.cast(X, tf.int32))\n print('Valid length of X:', X_valid_len)\n print('Y:', tf.cast(Y, tf.int32))\n print('Valid length of Y:', Y_valid_len)\n break"}