markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
**Chapter 10 – Introduction to Artificial Neural Networks** _This notebook contains all the sample code and solutions to the exercises in chapter 10._ Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
# To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed) # To plot pretty figures %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "ann" def save_fig(fig_id, tight_layout=True): path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png") print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=300)
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Perceptrons
import numpy as np from sklearn.datasets import load_iris from sklearn.linear_model import Perceptron iris = load_iris() X = iris.data[:, (2, 3)] # petal length, petal width y = (iris.target == 0).astype(np.int) per_clf = Perceptron(max_iter=100, random_state=42) per_clf.fit(X, y) y_pred = per_clf.predict([[2, 0.5]]) y_pred a = -per_clf.coef_[0][0] / per_clf.coef_[0][1] b = -per_clf.intercept_ / per_clf.coef_[0][1] axes = [0, 5, 0, 2] x0, x1 = np.meshgrid( np.linspace(axes[0], axes[1], 500).reshape(-1, 1), np.linspace(axes[2], axes[3], 200).reshape(-1, 1), ) X_new = np.c_[x0.ravel(), x1.ravel()] y_predict = per_clf.predict(X_new) zz = y_predict.reshape(x0.shape) plt.figure(figsize=(10, 4)) plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Not Iris-Setosa") plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa") plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3) from matplotlib.colors import ListedColormap custom_cmap = ListedColormap(['#9898ff', '#fafab0']) plt.contourf(x0, x1, zz, cmap=custom_cmap) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="lower right", fontsize=14) plt.axis(axes) save_fig("perceptron_iris_plot") plt.show()
Saving figure perceptron_iris_plot
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Activation functions
def logit(z): return 1 / (1 + np.exp(-z)) def relu(z): return np.maximum(0, z) def derivative(f, z, eps=0.000001): return (f(z + eps) - f(z - eps))/(2 * eps) z = np.linspace(-5, 5, 200) plt.figure(figsize=(11,4)) plt.subplot(121) plt.plot(z, np.sign(z), "r-", linewidth=2, label="Step") plt.plot(z, logit(z), "g--", linewidth=2, label="Logit") plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh") plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU") plt.grid(True) plt.legend(loc="center right", fontsize=14) plt.title("Activation functions", fontsize=14) plt.axis([-5, 5, -1.2, 1.2]) plt.subplot(122) plt.plot(z, derivative(np.sign, z), "r-", linewidth=2, label="Step") plt.plot(0, 0, "ro", markersize=5) plt.plot(0, 0, "rx", markersize=10) plt.plot(z, derivative(logit, z), "g--", linewidth=2, label="Logit") plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh") plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU") plt.grid(True) #plt.legend(loc="center right", fontsize=14) plt.title("Derivatives", fontsize=14) plt.axis([-5, 5, -0.2, 1.2]) save_fig("activation_functions_plot") plt.show() def heaviside(z): return (z >= 0).astype(z.dtype) def sigmoid(z): return 1/(1+np.exp(-z)) def mlp_xor(x1, x2, activation=heaviside): return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5) x1s = np.linspace(-0.2, 1.2, 100) x2s = np.linspace(-0.2, 1.2, 100) x1, x2 = np.meshgrid(x1s, x2s) z1 = mlp_xor(x1, x2, activation=heaviside) z2 = mlp_xor(x1, x2, activation=sigmoid) plt.figure(figsize=(10,4)) plt.subplot(121) plt.contourf(x1, x2, z1) plt.plot([0, 1], [0, 1], "gs", markersize=20) plt.plot([0, 1], [1, 0], "y^", markersize=20) plt.title("Activation function: heaviside", fontsize=14) plt.grid(True) plt.subplot(122) plt.contourf(x1, x2, z2) plt.plot([0, 1], [0, 1], "gs", markersize=20) plt.plot([0, 1], [1, 0], "y^", markersize=20) plt.title("Activation function: sigmoid", fontsize=14) plt.grid(True)
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
FNN for MNIST Using the Estimator API (formerly `tf.contrib.learn`)
import tensorflow as tf
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
**Warning**: `tf.examples.tutorials.mnist` is deprecated. We will use `tf.keras.datasets.mnist` instead. Moreover, the `tf.contrib.learn` API was promoted to `tf.estimators` and `tf.feature_columns`, and it has changed considerably. In particular, there is no `infer_real_valued_columns_from_input()` function or `SKCompat` class.
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data() X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0 X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0 y_train = y_train.astype(np.int32) y_test = y_test.astype(np.int32) X_valid, X_train = X_train[:5000], X_train[5000:] y_valid, y_train = y_train[:5000], y_train[5000:] feature_cols = [tf.feature_column.numeric_column("X", shape=[28 * 28])] dnn_clf = tf.estimator.DNNClassifier(hidden_units=[300,100], n_classes=10, feature_columns=feature_cols) input_fn = tf.estimator.inputs.numpy_input_fn( x={"X": X_train}, y=y_train, num_epochs=40, batch_size=50, shuffle=True) dnn_clf.train(input_fn=input_fn) test_input_fn = tf.estimator.inputs.numpy_input_fn( x={"X": X_test}, y=y_test, shuffle=False) eval_results = dnn_clf.evaluate(input_fn=test_input_fn) eval_results y_pred_iter = dnn_clf.predict(input_fn=test_input_fn) y_pred = list(y_pred_iter) y_pred[0]
INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from /tmp/tmpuflzeb_h/model.ckpt-44000 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op.
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Using plain TensorFlow
import tensorflow as tf n_inputs = 28*28 # MNIST n_hidden1 = 300 n_hidden2 = 100 n_outputs = 10 reset_graph() X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") y = tf.placeholder(tf.int32, shape=(None), name="y") def neuron_layer(X, n_neurons, name, activation=None): with tf.name_scope(name): n_inputs = int(X.get_shape()[1]) stddev = 2 / np.sqrt(n_inputs) init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev) W = tf.Variable(init, name="kernel") b = tf.Variable(tf.zeros([n_neurons]), name="bias") Z = tf.matmul(X, W) + b if activation is not None: return activation(Z) else: return Z with tf.name_scope("dnn"): hidden1 = neuron_layer(X, n_hidden1, name="hidden1", activation=tf.nn.relu) hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2", activation=tf.nn.relu) logits = neuron_layer(hidden2, n_outputs, name="outputs") with tf.name_scope("loss"): xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(xentropy, name="loss") learning_rate = 0.01 with tf.name_scope("train"): optimizer = tf.train.GradientDescentOptimizer(learning_rate) training_op = optimizer.minimize(loss) with tf.name_scope("eval"): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) init = tf.global_variables_initializer() saver = tf.train.Saver() n_epochs = 40 batch_size = 50 def shuffle_batch(X, y, batch_size): rnd_idx = np.random.permutation(len(X)) n_batches = len(X) // batch_size for batch_idx in np.array_split(rnd_idx, n_batches): X_batch, y_batch = X[batch_idx], y[batch_idx] yield X_batch, y_batch with tf.Session() as sess: init.run() for epoch in range(n_epochs): for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size): sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch}) acc_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid}) print(epoch, "Batch accuracy:", acc_batch, "Val accuracy:", acc_val) save_path = saver.save(sess, "./my_model_final.ckpt") with tf.Session() as sess: saver.restore(sess, "./my_model_final.ckpt") # or better, use save_path X_new_scaled = X_test[:20] Z = logits.eval(feed_dict={X: X_new_scaled}) y_pred = np.argmax(Z, axis=1) print("Predicted classes:", y_pred) print("Actual classes: ", y_test[:20]) from tensorflow_graph_in_jupyter import show_graph show_graph(tf.get_default_graph())
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Using `dense()` instead of `neuron_layer()` Note: previous releases of the book used `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function, except for a few minor differences:* several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.* the default `activation` is now `None` rather than `tf.nn.relu`.* a few more differences are presented in chapter 11.
n_inputs = 28*28 # MNIST n_hidden1 = 300 n_hidden2 = 100 n_outputs = 10 reset_graph() X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") y = tf.placeholder(tf.int32, shape=(None), name="y") with tf.name_scope("dnn"): hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1", activation=tf.nn.relu) hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2", activation=tf.nn.relu) logits = tf.layers.dense(hidden2, n_outputs, name="outputs") y_proba = tf.nn.softmax(logits) with tf.name_scope("loss"): xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(xentropy, name="loss") learning_rate = 0.01 with tf.name_scope("train"): optimizer = tf.train.GradientDescentOptimizer(learning_rate) training_op = optimizer.minimize(loss) with tf.name_scope("eval"): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) init = tf.global_variables_initializer() saver = tf.train.Saver() n_epochs = 20 n_batches = 50 with tf.Session() as sess: init.run() for epoch in range(n_epochs): for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size): sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch}) acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid}) print(epoch, "Batch accuracy:", acc_batch, "Validation accuracy:", acc_valid) save_path = saver.save(sess, "./my_model_final.ckpt") show_graph(tf.get_default_graph())
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Exercise solutions 1. to 8. See appendix A. 9. _Train a deep MLP on the MNIST dataset and see if you can get over 98% precision. Just like in the last exercise of chapter 9, try adding all the bells and whistles (i.e., save checkpoints, restore the last checkpoint in case of an interruption, add summaries, plot learning curves using TensorBoard, and so on)._ First let's create the deep net. It's exactly the same as earlier, with just one addition: we add a `tf.summary.scalar()` to track the loss and the accuracy during training, so we can view nice learning curves using TensorBoard.
n_inputs = 28*28 # MNIST n_hidden1 = 300 n_hidden2 = 100 n_outputs = 10 reset_graph() X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") y = tf.placeholder(tf.int32, shape=(None), name="y") with tf.name_scope("dnn"): hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1", activation=tf.nn.relu) hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2", activation=tf.nn.relu) logits = tf.layers.dense(hidden2, n_outputs, name="outputs") with tf.name_scope("loss"): xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(xentropy, name="loss") loss_summary = tf.summary.scalar('log_loss', loss) learning_rate = 0.01 with tf.name_scope("train"): optimizer = tf.train.GradientDescentOptimizer(learning_rate) training_op = optimizer.minimize(loss) with tf.name_scope("eval"): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) accuracy_summary = tf.summary.scalar('accuracy', accuracy) init = tf.global_variables_initializer() saver = tf.train.Saver()
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Now we need to define the directory to write the TensorBoard logs to:
from datetime import datetime def log_dir(prefix=""): now = datetime.utcnow().strftime("%Y%m%d%H%M%S") root_logdir = "tf_logs" if prefix: prefix += "-" name = prefix + "run-" + now return "{}/{}/".format(root_logdir, name) logdir = log_dir("mnist_dnn")
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Now we can create the `FileWriter` that we will use to write the TensorBoard logs:
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Hey! Why don't we implement early stopping? For this, we are going to need to use the validation set.
m, n = X_train.shape n_epochs = 10001 batch_size = 50 n_batches = int(np.ceil(m / batch_size)) checkpoint_path = "/tmp/my_deep_mnist_model.ckpt" checkpoint_epoch_path = checkpoint_path + ".epoch" final_model_path = "./my_deep_mnist_model" best_loss = np.infty epochs_without_progress = 0 max_epochs_without_progress = 50 with tf.Session() as sess: if os.path.isfile(checkpoint_epoch_path): # if the checkpoint file exists, restore the model and load the epoch number with open(checkpoint_epoch_path, "rb") as f: start_epoch = int(f.read()) print("Training was interrupted. Continuing at epoch", start_epoch) saver.restore(sess, checkpoint_path) else: start_epoch = 0 sess.run(init) for epoch in range(start_epoch, n_epochs): for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size): sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) accuracy_val, loss_val, accuracy_summary_str, loss_summary_str = sess.run([accuracy, loss, accuracy_summary, loss_summary], feed_dict={X: X_valid, y: y_valid}) file_writer.add_summary(accuracy_summary_str, epoch) file_writer.add_summary(loss_summary_str, epoch) if epoch % 5 == 0: print("Epoch:", epoch, "\tValidation accuracy: {:.3f}%".format(accuracy_val * 100), "\tLoss: {:.5f}".format(loss_val)) saver.save(sess, checkpoint_path) with open(checkpoint_epoch_path, "wb") as f: f.write(b"%d" % (epoch + 1)) if loss_val < best_loss: saver.save(sess, final_model_path) best_loss = loss_val else: epochs_without_progress += 5 if epochs_without_progress > max_epochs_without_progress: print("Early stopping") break os.remove(checkpoint_epoch_path) with tf.Session() as sess: saver.restore(sess, final_model_path) accuracy_val = accuracy.eval(feed_dict={X: X_test, y: y_test}) accuracy_val
_____no_output_____
Apache-2.0
10_introduction_to_artificial_neural_networks.ipynb
leoluyi/handson-ml
Define a function for which we'd like to find the roots
def function_for_roots(x): a = 1.01 b = -3.04 c = 2.07 return a*x**2 + b*x + c #get the roots of ax^2 + bx + c
_____no_output_____
MIT
astr-119-session-7/bisection_search_demo.ipynb
spaceghst007/astro-119
We need a function to check whether our initial values are valid
def check_initial_values(f, x_min, x_max, tol): #check our initial guesses y_min = f(x_min) y_max = f(x_max) #check that x_min and x_max contain a zero crossing if(y_min*y_max>=0.0): print("No zero crossing found in the range = ",x_min,x_max) s = "f(%f) = %f, f(%f) = %f" % (x_min,y_min,x_max,y_max) print(s) return 0 #if x_min is a root, then return flag == 1 if(np.fabs(y_min)<tol): return 1 #if x_max is a root, then return flag == 2 if(np.fabs(y_max)<tol): return 2 #if we reach this point, the bracket is valid #and we will return 3 return 3
_____no_output_____
MIT
astr-119-session-7/bisection_search_demo.ipynb
spaceghst007/astro-119
Now we will define the main work function that actually performs the iterative search
def bisection_root_finding(f, x_min_start, x_max_start, tol): #this function uses bisection search to find a root x_min = x_min_start #minimum x in bracket x_max = x_max_start #maximum x in bracket x_mid = 0.0 #mid point y_min = f(x_min) #function value at x_min y_max = f(x_max) #function value at x_max y_mid = 0.0 #function value at mid point imax = 10000 #set a maximum number of iterations i = 0 #iteration counter #check the initial values flag = check_initial_values(f,x_min,x_max,tol) if(flag==0): print("Error in bisection_root_finding().") raise ValueError('Intial values invalid',x_min,x_max) elif(flag==1): #lucky guess return x_min elif(flag==2): #another lucky guess return x_max #if we reach here, then we need to conduct the search #set a flag flag = 1 #enter a while loop while(flag): x_mid = 0.5*(x_min+x_max) #mid point y_mid = f(x_mid) #function value at x_mid #check if x_mid is a root if(np.fabs(y_mid)<tol): flag = 0 else: #x_mid is not a root #if the product of the functio at the midpoint #and at one of the end points is greater than #zero, replace this end point if(f(x_min)*f(x_mid)>0): #replace x_min with x_mid x_min = x_mid else: #repalce x_max with x_mid x_max = x_mid #print out the iteration print(x_min,f(x_min),x_max,f(x_max)) #count the iteration i += 1 #if we have exceeded the max number #of iterations, exit if(i>=imax): print("Exceeded max number of iterations = ",i) s = "Min bracket f(%f) = %f" % (x_min,f(x_min)) print(s) s = "Max bracket f(%f) = %f" % (x_max,f(x_max)) print(s) s = "Mid bracket f(%f) = %f" % (x_mid,f(x_mid)) print(s) raise StopIteration('Stopping iterations after ',i) #we are done! return x_mid x_min = 0.0 x_max = 1.5 tolerance = 1.0e-6 #print the initial guess print(x_min,function_for_roots(x_min)) print(x_max,function_for_roots(x_max)) x_root = bisection_root_finding(function_for_roots,x_min,x_max,tolerance) y_root = function_for_roots(x_root) s = "Root found with y(%f) = %f" % (x_root,y_root) print(s)
_____no_output_____
MIT
astr-119-session-7/bisection_search_demo.ipynb
spaceghst007/astro-119
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).**The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!**No changes were made to the contents of this notebook from the original.* Histograms, Binnings, and Density A simple histogram can be a great first step in understanding a dataset.Earlier, we saw a preview of Matplotlib's histogram function (see [Comparisons, Masks, and Boolean Logic](02.06-Boolean-Arrays-and-Masks.ipynb)), which creates a basic histogram in one line, once the normal boiler-plate imports are done:
%matplotlib inline import numpy as np import matplotlib.pyplot as plt plt.style.use('seaborn-white') data = np.random.randn(1000) plt.hist(data);
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
The ``hist()`` function has many options to tune both the calculation and the display; here's an example of a more customized histogram:
plt.hist(data, bins=30, normed=True, alpha=0.5, histtype='stepfilled', color='steelblue', edgecolor='none');
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
The ``plt.hist`` docstring has more information on other customization options available.I find this combination of ``histtype='stepfilled'`` along with some transparency ``alpha`` to be very useful when comparing histograms of several distributions:
x1 = np.random.normal(0, 0.8, 1000) x2 = np.random.normal(-2, 1, 1000) x3 = np.random.normal(3, 2, 1000) kwargs = dict(histtype='stepfilled', alpha=0.3, normed=True, bins=40) plt.hist(x1, **kwargs) plt.hist(x2, **kwargs) plt.hist(x3, **kwargs);
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
If you would like to simply compute the histogram (that is, count the number of points in a given bin) and not display it, the ``np.histogram()`` function is available:
counts, bin_edges = np.histogram(data, bins=5) print(counts)
[ 12 190 468 301 29]
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
Two-Dimensional Histograms and BinningsJust as we create histograms in one dimension by dividing the number-line into bins, we can also create histograms in two-dimensions by dividing points among two-dimensional bins.We'll take a brief look at several ways to do this here.We'll start by defining some data—an ``x`` and ``y`` array drawn from a multivariate Gaussian distribution:
mean = [0, 0] cov = [[1, 1], [1, 2]] x, y = np.random.multivariate_normal(mean, cov, 10000).T
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
``plt.hist2d``: Two-dimensional histogramOne straightforward way to plot a two-dimensional histogram is to use Matplotlib's ``plt.hist2d`` function:
plt.hist2d(x, y, bins=30, cmap='Blues') cb = plt.colorbar() cb.set_label('counts in bin')
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
Just as with ``plt.hist``, ``plt.hist2d`` has a number of extra options to fine-tune the plot and the binning, which are nicely outlined in the function docstring.Further, just as ``plt.hist`` has a counterpart in ``np.histogram``, ``plt.hist2d`` has a counterpart in ``np.histogram2d``, which can be used as follows:
counts, xedges, yedges = np.histogram2d(x, y, bins=30)
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
For the generalization of this histogram binning in dimensions higher than two, see the ``np.histogramdd`` function. ``plt.hexbin``: Hexagonal binningsThe two-dimensional histogram creates a tesselation of squares across the axes.Another natural shape for such a tesselation is the regular hexagon.For this purpose, Matplotlib provides the ``plt.hexbin`` routine, which will represents a two-dimensional dataset binned within a grid of hexagons:
plt.hexbin(x, y, gridsize=30, cmap='Blues') cb = plt.colorbar(label='count in bin')
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
``plt.hexbin`` has a number of interesting options, including the ability to specify weights for each point, and to change the output in each bin to any NumPy aggregate (mean of weights, standard deviation of weights, etc.). Kernel density estimationAnother common method of evaluating densities in multiple dimensions is *kernel density estimation* (KDE).This will be discussed more fully in [In-Depth: Kernel Density Estimation](05.13-Kernel-Density-Estimation.ipynb), but for now we'll simply mention that KDE can be thought of as a way to "smear out" the points in space and add up the result to obtain a smooth function.One extremely quick and simple KDE implementation exists in the ``scipy.stats`` package.Here is a quick example of using the KDE on this data:
from scipy.stats import gaussian_kde # fit an array of size [Ndim, Nsamples] data = np.vstack([x, y]) kde = gaussian_kde(data) # evaluate on a regular grid xgrid = np.linspace(-3.5, 3.5, 40) ygrid = np.linspace(-6, 6, 40) Xgrid, Ygrid = np.meshgrid(xgrid, ygrid) Z = kde.evaluate(np.vstack([Xgrid.ravel(), Ygrid.ravel()])) # Plot the result as an image plt.imshow(Z.reshape(Xgrid.shape), origin='lower', aspect='auto', extent=[-3.5, 3.5, -6, 6], cmap='Blues') cb = plt.colorbar() cb.set_label("density")
_____no_output_____
Apache-2.0
matplotlib/04.05-Histograms-and-Binnings.ipynb
purushothamgowthu/data-science-ipython-notebooks
The effect of a given mutation on antibody binding was represented by apparent affinity (avidity) relative to those for wild-type (WT) gp120, calculated with the formula ([(EC50_WT/EC50_mutant)/(EC50_WT for 2G12/EC50_mutant for 2G12)] × 100)
# Test data VIH_final = pd.read_csv('../data/VIH_Test15.csv',index_col=0) # original info data vih_data = pd.read_csv("../data/HIV_escape_mutations.csv",sep="\t") #vih_data["pred_ddg2EC50"] = vih_data["mCSM-AB_Pred"].apply(deltaG_to_Kd)*100 vih_original = vih_data.loc[vih_data["Mutation_type"]=="ORIGINAL"].copy() vih_reverse = vih_data.loc[vih_data["Mutation_type"]=="REVERSE"] #sort values to appedn to prediction data table vih_original.loc[:,"mut_code"] = (vih_reverse["Chain"]+vih_reverse["Mutation"].str[1:]).values vih_original.sort_values(by='mut_code',inplace=True) vih_original["Mutation_original"] = vih_original["Mutation"].str[-1]+vih_original["Mutation"].str[1:-1]+vih_original["Mutation"].str[0] vih_original.loc[(vih_original['Exptal'] <= 33 ),"mutation-effect"] = "decreased" vih_original.loc[(vih_original['Exptal'] > 300 ),"mutation-effect"] = "increased" vih_original.loc[(vih_original['Exptal'] < 300 )&(vih_original['Exptal'] > 33 ),"mutation-effect"] = "neutral" vih_reverse.loc[(vih_reverse['Exptal'] <= 33 ),"mutation-effect"] = "decreased" vih_reverse.loc[(vih_reverse['Exptal'] > 300 ),"mutation-effect"] = "increased" vih_reverse.loc[(vih_reverse['Exptal'] < 300 )&(vih_reverse['Exptal'] > 33 ),"mutation-effect"] = "neutral" # #xgbr = XGBRegressor() #xgbr.load_model(fname='xgb_final_400F_smote_032019.sav') #xgbr_borderline = XGBRegressor() #xgbr_borderline.load_model(fname='xgb_final_400F_borderlinesmote_032019.sav') # X and y data transformed to delta G X = VIH_final.drop("Exptal",axis=1) y_energy = (VIH_final["Exptal"]/1000).apply(Kd_2_dG) y_binding = VIH_final["Exptal"].values PreparePredictions(X).run() X.ddg.sort_values().head(10) vih_original.loc[vih_original["mutation-effect"]=="increased"] 461 197 #ridge_model = joblib.load('ridgeLinear_train15skempiAB_FINAL.pkl') lasso_model = joblib.load('Lasso_train15skempiAB_FINAL.pkl') elasticnet_model = joblib.load('elasticNet_train15skempiAB_FINAL.pkl') svr_model = joblib.load('rbfSVRmodel_train15skempiAB_FINAL.pkl') poly_model = joblib.load("poly2SVRmodel_train15skempiAB_FINAL.pkl") #rf_model = joblib.load('RFmodel_train15skempiAB_FINAL.pkl') gbt_model = joblib.load('GBTmodel_train15skempiAB_FINAL.overf.pkl') #xgb_model = joblib.load('XGBmodel_train15skempiAB_FINAL.pkl') #ridge_pred = ridge_model.predict(X) lasso_pred = lasso_model.predict(X) elasticnet_pred = elasticnet_model.predict(X) svr_pred = svr_model.predict(X) poly_pred = poly_model.predict(X) #rf_pred = rf_model.predict(X) gbt_pred = gbt_model.predict(X) #xgb_pred = xgb_model.predict(X) pred_stack = np.hstack([vih_original[["mutation-effect","mCSM-AB_Pred","Exptal"]].values, lasso_pred.reshape((-1,1)),gbt_pred.reshape((-1,1)),svr_pred.reshape((-1,1)),poly_pred.reshape((-1,1))]) pred_data = pd.DataFrame(pred_stack,columns=["mutation-effect","mCSM-AB_Pred","Exptal","Lasso_pred","gbt_pred","svr_pred","poly_pred"]) # transform prediction score to relative to kd , refered in paper #pred_data_binding = pred_data.applymap(deltaG_to_Kd)*100 pred_data["mean-pred"] = pred_data.loc[:,["Lasso_pred","gbt_pred","svr_pred"]].mean(axis=1) pred_data pred_data.loc[pred_data["mutation-effect"]=="increased"] pred_data.loc[(pred_data["mean-pred"].abs() > 0.1)] pred_data["True"] = y_energy.values pred_data_binding["True"] = y_binding #pred_data_converted.corr() pred_data_binding.corr() pred_data average_pred_binding = pred_data_binding.drop("True",axis=1).loc[:,["gbt_pred","elasticnet_pred"]].mean(axis=1) average_pred_energy = pred_data.drop("True",axis=1).loc[:,["gbt_pred","elasticnet_pred"]].mean(axis=1) r2score = r2_score(y_energy,average_pred_energy) rmse = mean_squared_error(y_energy,average_pred_energy) print("R2 score:", r2score) print("RMSE score:", np.sqrt(rmse)) np.corrcoef(y["Exptal"],average_pred) # Corr mCSM-AB with converted mCSM AB data np.corrcoef(y_binding,vih_reverse["pred_ddg2EC50"]) # Corr mCSM-AB with converted VIH paper data np.corrcoef(y_energy,vih_reverse["mCSM-AB_Pred"]) # Corr FoldX feature alone np.corrcoef(y["Exptal"],VIH_final["dg_change"].apply(deltaG_to_Kd)*100) import seaborn as sns #rmse_test = np.round(np.sqrt(mean_squared_error(y_test, y_pred_test)), 3) df_pred = pd.DataFrame({"Predicted ddG(kcal/mol)": pred_data["gbt_pred"], "Actual ddG(kcal/mol)": y_energy.values}) pearsonr_test = round(df_pred.corr().iloc[0,1],3) g = sns.regplot(x="Actual ddG(kcal/mol)", y="Predicted ddG(kcal/mol)",data=df_pred) plt.title("Predicted vs Experimental ddG (Independent set: 123 complexes)") plt.text(-2,3,"pearsonr = %s" %pearsonr_test) #plt.text(4.5,-0.5,"RMSE = %s" %rmse_test) #plt.savefig("RFmodel_300_testfit.png",dpi=600) PredictionError?
_____no_output_____
MIT
notebooks/benchmark_vih.ipynb
victorfica/Master-thesis
Example of extracting features from dataframes with Datetime indicesAssuming that time-varying measurements are taken at regular intervals can be sufficient for many situations. However, for a large number of tasks it is important to take into account **when** a measurement is made. An example can be healthcare, where the interval between measurements of vital signs contains crucial information. Tsfresh now supports calculator functions that use the index of the timeseries container in order to calculate the features. The only requirements for these function is that the index of the input dataframe is of type `pd.DatetimeIndex`. These functions are contained in the new class TimeBasedFCParameters.Note that the behaviour of all other functions is unaffected. The settings parameter of `extract_features()` can contain both index-dependent functions and 'regular' functions.
import pandas as pd from tsfresh.feature_extraction import extract_features # TimeBasedFCParameters contains all functions that use the Datetime index of the timeseries container from tsfresh.feature_extraction.settings import TimeBasedFCParameters
_____no_output_____
MIT
notebooks/feature_extraction_with_datetime_index.ipynb
hoesler/tsfresh
Build a time series container with Datetime indicesLet's build a dataframe with a datetime index. The format must be with a `value` and a `kind` column, since each measurement has its own timestamp - i.e. measurements are not assumed to be simultaneous.
df = pd.DataFrame({"id": ["a", "a", "a", "a", "b", "b", "b", "b"], "value": [1, 2, 3, 1, 3, 1, 0, 8], "kind": ["temperature", "temperature", "pressure", "pressure", "temperature", "temperature", "pressure", "pressure"]}, index=pd.DatetimeIndex( ['2019-03-01 10:04:00', '2019-03-01 10:50:00', '2019-03-02 00:00:00', '2019-03-02 09:04:59', '2019-03-02 23:54:12', '2019-03-03 08:13:04', '2019-03-04 08:00:00', '2019-03-04 08:01:00'] )) df = df.sort_index() df
_____no_output_____
MIT
notebooks/feature_extraction_with_datetime_index.ipynb
hoesler/tsfresh
Right now `TimeBasedFCParameters` only contains `linear_trend_timewise`, which performs a calculation of a linear trend, but using the time difference in hours between measurements in order to perform the linear regression. As always, you can add your own functions in `tsfresh/feature_extraction/feature_calculators.py`.
settings_time = TimeBasedFCParameters() settings_time
_____no_output_____
MIT
notebooks/feature_extraction_with_datetime_index.ipynb
hoesler/tsfresh
We extract the features as usual, specifying the column value, kind, and id.
X_tsfresh = extract_features(df, column_id="id", column_value='value', column_kind='kind', default_fc_parameters=settings_time) X_tsfresh.head()
Feature Extraction: 100%|██████████| 4/4 [00:00<00:00, 591.10it/s]
MIT
notebooks/feature_extraction_with_datetime_index.ipynb
hoesler/tsfresh
The output looks exactly, like usual. If we compare it with the 'regular' `linear_trend` feature calculator, we can see that the intercept, p and R values are the same, as we'd expect – only the slope is now different.
settings_regular = {'linear_trend': [ {'attr': 'pvalue'}, {'attr': 'rvalue'}, {'attr': 'intercept'}, {'attr': 'slope'}, {'attr': 'stderr'} ]} X_tsfresh = extract_features(df, column_id="id", column_value='value', column_kind='kind', default_fc_parameters=settings_regular) X_tsfresh.head()
Feature Extraction: 100%|██████████| 4/4 [00:00<00:00, 2517.59it/s]
MIT
notebooks/feature_extraction_with_datetime_index.ipynb
hoesler/tsfresh
Gymnasium
gym = pd.read_csv('../Results/Gym_Rating.csv') del gym['Unnamed: 0'] gym.replace('NAN', value=0, inplace=True) gym = gym.rename(columns={'gym Total Count':'Total Count', 'Facility gym':'Gymnasium Facility'}) gym['Rating']=gym['Rating'].astype(float) gym['Total Count']=gym['Total Count'].astype(int) gym.head() new_gym = gym.groupby(['City Name', 'Site Name']) gym_count_df = pd.DataFrame(new_gym['Site Name'].value_counts()) gym_count_df = gym_count_df.rename(columns={'Site Name': 'Total Count'}) gym_count_df = gym_count_df.reset_index(level=1) gym_count_df = gym_count_df.reset_index(level=0) gym_count_df = gym_count_df.reset_index(drop=True) gym_count_df.head() gym_count_final = gym_count_df.groupby(['City Name']) gym_count_final_df = pd.DataFrame(gym_count_final['Total Count'].median()) gym_count_final_df = gym_count_final_df.sort_values(['Total Count'])[::-1] gym_count_final_df = gym_count_final_df.reset_index() gym_count_final_df['Type']='Gymnasium' gym_count_final_df = gym_count_final_df.drop([6]) gym_count_final_df = gym_count_final_df.reset_index(drop=True) gym_count_final_df print("========================================") print("==================TEST====================") sns.factorplot(kind='bar',x='Type',y='Total Count',data=gym_count_final_df, hue='City Name', size=5, aspect=2.5) total_count = gym_count_final_df.groupby(['City Name'])['Total Count'].median().sort_values()[::-1].reset_index() total_count_df = pd.DataFrame(total_count) print(total_count_df) ranks_dict = {} y=1 for name in total_count_df['City Name']: ranks_dict[name] = y y=y+1 print(ranks_dict) plt.title('City Nearby Fitness Ranking', fontsize=20, fontweight='bold') plt.xlabel(' ', fontsize=15) plt.ylabel('Median Count', fontsize=15) plt.xticks(fontsize=12) plt.yticks(fontsize=12) new_labels = ['#1 New York', '#2 Chicago', '#3 Boston', '#4 Washington DC', '#5 Los Angeles', '#6 Austin', '#7 Raleigh', '#8 Atlanta'] plt.legend(new_labels, frameon=False, title='Rank', bbox_to_anchor=(.85, 1), loc=1, borderaxespad=0.) print("========================================") print("==================END====================") plt.savefig('Save_Figs/Fitness.png', bbox_inches='tight') plt.show()
======================================== ==================TEST==================== City Name Total Count 0 New York 20.0 1 Chicago 20.0 2 Boston 17.0 3 Washington DC 13.5 4 Los Angeles 13.0 5 Austin 7.5 6 Raleigh 5.0 7 Atlanta 5.0 {'New York': 1, 'Chicago': 2, 'Boston': 3, 'Washington DC': 4, 'Los Angeles': 5, 'Austin': 6, 'Raleigh': 7, 'Atlanta': 8} ======================================== ==================END====================
MIT
Amenities_Niyati/Plots/Amazon_nearby_Amenities_Fitness_Ranking.ipynb
gvo34/BC_Project1
TalkingData: Fraudulent Click Prediction In this notebook, we will apply various boosting algorithms to solve an interesting classification problem from the domain of 'digital fraud'.The analysis is divided into the following sections:- Understanding the business problem- Understanding and exploring the data- Feature engineering: Creating new features- Model building and evaluation: AdaBoost- Modelling building and evaluation: Gradient Boosting- Modelling building and evaluation: XGBoost Understanding the Business ProblemTalkingData is a Chinese big data company, and one of their areas of expertise is mobile advertisements.In mobile advertisements, **click fraud** is a major source of losses. Click fraud is the practice of repeatedly clicking on an advertisement hosted on a website with the intention of generating revenue for the host website or draining revenue from the advertiser.In this case, TalkingData happens to be serving the advertisers (their clients). TalkingData cover a whopping **approx. 70% of the active mobile devices in China**, of which 90% are potentially fraudulent (i.e. the user is actually not going to download the app after clicking).You can imagine the amount of money they can help clients save if they are able to predict whether a given click is fraudulent (or equivalently, whether a given click will result in a download). Their current approach to solve this problem is that they've generated a blacklist of IP addresses - those IPs which produce lots of clicks, but never install any apps. Now, they want to try some advanced techniques to predict the probability of a click being genuine/fraud.In this problem, we will use the features associated with clicks, such as IP address, operating system, device type, time of click etc. to predict the probability of a click being fraud.They have released the problem on Kaggle here.. Understanding and Exploring the DataThe data contains observations of about 240 million clicks, and whether a given click resulted in a download or not (1/0). On Kaggle, the data is split into train.csv and train_sample.csv (100,000 observations). We'll use the smaller train_sample.csv in this notebook for speed, though while training the model for Kaggle submissions, the full training data will obviously produce better results.The detailed data dictionary is mentioned here:- ```ip```: ip address of click.- ```app```: app id for marketing.- ```device```: device type id of user mobile phone (e.g., iphone 6 plus, iphone 7, huawei mate 7, etc.)- ```os```: os version id of user mobile phone- ```channel```: channel id of mobile ad publisher- ```click_time```: timestamp of click (UTC)- ```attributed_time```: if user download the app for after clicking an ad, this is the time of the app download- ```is_attributed```: the target that is to be predicted, indicating the app was downloadedLet's try finding some useful trends in the data.
import numpy as np import pandas as pd import sklearn import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.preprocessing import LabelEncoder from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn import metrics import xgboost as xgb from xgboost import XGBClassifier from xgboost import plot_importance import gc # for deleting unused variables %matplotlib inline import os import warnings warnings.filterwarnings('ignore')
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Reading the Data The code below reads the train_sample.csv file if you set testing = True, else reads the full train.csv file. You can read the sample while tuning the model etc., and then run the model on the full data once done. Important Note: Save memory when the data is hugeSince the training data is quite huge, the program will be quite slow if you don't consciously follow some best practices to save memory. This notebook demonstrates some of those practices.
# reading training data # specify column dtypes to save memory (by default pandas reads some columns as floats) dtypes = { 'ip' : 'uint16', 'app' : 'uint16', 'device' : 'uint16', 'os' : 'uint16', 'channel' : 'uint16', 'is_attributed' : 'uint8', 'click_id' : 'uint32' # note that click_id is only in test data, not training data } # read training_sample.csv for quick testing/debug, else read the full train.csv testing = True if testing: train_path = "train_sample.csv" skiprows = None nrows = None colnames=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'] else: train_path = "train.csv" skiprows = range(1, 144903891) nrows = 10000000 colnames=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'] # read training data train_sample = pd.read_csv(train_path, skiprows=skiprows, nrows=nrows, dtype=dtypes, usecols=colnames) # length of training data len(train_sample.index) # Displays memory consumed by each column --- print(train_sample.memory_usage()) # space used by training data print('Training dataset uses {0} MB'.format(train_sample.memory_usage().sum()/1024**2)) # training data top rows train_sample.head()
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Exploring the Data - Univariate Analysis Let's now understand and explore the data. Let's start with understanding the size and data types of the train_sample data.
# look at non-null values, number of entries etc. # there are no missing values train_sample.info() # Basic exploratory analysis # Number of unique values in each column def fraction_unique(x): return len(train_sample[x].unique()) number_unique_vals = {x: fraction_unique(x) for x in train_sample.columns} number_unique_vals # All columns apart from click time are originally int type, # though note that they are all actually categorical train_sample.dtypes
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
There are certain 'apps' which have quite high number of instances/rows (each row is a click). The plot below shows this.
# # distribution of 'app' # # some 'apps' have a disproportionately high number of clicks (>15k), and some are very rare (3-4) plt.figure(figsize=(14, 8)) sns.countplot(x="app", data=train_sample) # # distribution of 'device' # # this is expected because a few popular devices are used heavily plt.figure(figsize=(14, 8)) sns.countplot(x="device", data=train_sample) # # channel: various channels get clicks in comparable quantities plt.figure(figsize=(14, 8)) sns.countplot(x="channel", data=train_sample) # # os: there are a couple commos OSes (android and ios?), though some are rare and can indicate suspicion plt.figure(figsize=(14, 8)) sns.countplot(x="os", data=train_sample)
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Let's now look at the distribution of the target variable 'is_attributed'.
# # target variable distribution 100*(train_sample['is_attributed'].astype('object').value_counts()/len(train_sample.index))
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Only **about 0.2% of clicks are 'fraudulent'**, which is expected in a fraud detection problem. Such high class imbalance is probably going to be the toughest challenge of this problem. Exploring the Data - Segmented Univariate AnalysisLet's now look at how the target variable varies with the various predictors.
# plot the average of 'is_attributed', or 'download rate' # with app (clearly this is non-readable) app_target = train_sample.groupby('app').is_attributed.agg(['mean', 'count']) app_target
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
This is clearly non-readable, so let's first get rid of all the apps that are very rare (say which comprise of less than 20% clicks) and plot the rest.
frequent_apps = train_sample.groupby('app').size().reset_index(name='count') frequent_apps = frequent_apps[frequent_apps['count']>frequent_apps['count'].quantile(0.80)] frequent_apps = frequent_apps.merge(train_sample, on='app', how='inner') frequent_apps.head() plt.figure(figsize=(10,10)) sns.countplot(y="app", hue="is_attributed", data=frequent_apps);
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
You can do lots of other interesting ananlysis with the existing features. For now, let's create some new features which will probably improve the model. Feature Engineering Let's now derive some new features from the existing ones. There are a number of features one can extract from ```click_time``` itself, and by grouping combinations of IP with other features. Datetime Based Features
# Creating datetime variables # takes in a df, adds date/time based columns to it, and returns the modified df def timeFeatures(df): # Derive new features using the click_time column df['datetime'] = pd.to_datetime(df['click_time']) df['day_of_week'] = df['datetime'].dt.dayofweek df["day_of_year"] = df["datetime"].dt.dayofyear df["month"] = df["datetime"].dt.month df["hour"] = df["datetime"].dt.hour return df # creating new datetime variables and dropping the old ones train_sample = timeFeatures(train_sample) train_sample.drop(['click_time', 'datetime'], axis=1, inplace=True) train_sample.head() # datatypes # note that by default the new datetime variables are int64 train_sample.dtypes # memory used by training data print('Training dataset uses {0} MB'.format(train_sample.memory_usage().sum()/1024**2)) # lets convert the variables back to lower dtype again int_vars = ['app', 'device', 'os', 'channel', 'day_of_week','day_of_year', 'month', 'hour'] train_sample[int_vars] = train_sample[int_vars].astype('uint16') train_sample.dtypes # space used by training data print('Training dataset uses {0} MB'.format(train_sample.memory_usage().sum()/1024**2))
Training dataset uses 1.812103271484375 MB
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
IP Grouping Based Features Let's now create some important features by grouping IP addresses with features such as os, channel, hour, day etc. Also, count of each IP address will also be a feature.Note that though we are deriving new features by grouping IP addresses, using IP adress itself as a features is not a good idea. This is because (in the test data) if a new IP address is seen, the model will see a new 'category' and will not be able to make predictions (IP is a categorical variable, it has just been encoded with numbers).
# number of clicks by count of IP address # note that we are explicitly asking pandas to re-encode the aggregated features # as 'int16' to save memory ip_count = train_sample.groupby('ip').size().reset_index(name='ip_count').astype('int16') ip_count.head()
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
We can now merge this dataframe with the original training df. Similarly, we can create combinations of various features such as ip_day_hour (count of ip-day-hour combinations), ip_hour_channel, ip_hour_app, etc. The following function takes in a dataframe and creates these features.
# creates groupings of IP addresses with other features and appends the new features to the df def grouped_features(df): # ip_count ip_count = df.groupby('ip').size().reset_index(name='ip_count').astype('uint16') ip_day_hour = df.groupby(['ip', 'day_of_week', 'hour']).size().reset_index(name='ip_day_hour').astype('uint16') ip_hour_channel = df[['ip', 'hour', 'channel']].groupby(['ip', 'hour', 'channel']).size().reset_index(name='ip_hour_channel').astype('uint16') ip_hour_os = df.groupby(['ip', 'hour', 'os']).channel.count().reset_index(name='ip_hour_os').astype('uint16') ip_hour_app = df.groupby(['ip', 'hour', 'app']).channel.count().reset_index(name='ip_hour_app').astype('uint16') ip_hour_device = df.groupby(['ip', 'hour', 'device']).channel.count().reset_index(name='ip_hour_device').astype('uint16') # merge the new aggregated features with the df df = pd.merge(df, ip_count, on='ip', how='left') del ip_count df = pd.merge(df, ip_day_hour, on=['ip', 'day_of_week', 'hour'], how='left') del ip_day_hour df = pd.merge(df, ip_hour_channel, on=['ip', 'hour', 'channel'], how='left') del ip_hour_channel df = pd.merge(df, ip_hour_os, on=['ip', 'hour', 'os'], how='left') del ip_hour_os df = pd.merge(df, ip_hour_app, on=['ip', 'hour', 'app'], how='left') del ip_hour_app df = pd.merge(df, ip_hour_device, on=['ip', 'hour', 'device'], how='left') del ip_hour_device return df train_sample = grouped_features(train_sample) train_sample.head() print('Training dataset uses {0} MB'.format(train_sample.memory_usage().sum()/1024**2)) # garbage collect (unused) object gc.collect()
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
ModellingLet's now build models to predict the variable ```is_attributed``` (downloaded). We'll try the several variants of boosting (adaboost, gradient boosting and XGBoost), tune the hyperparameters in each model and choose the one which gives the best performance.In the original Kaggle competition, the metric for model evaluation is **area under the ROC curve**.
# create x and y train X = train_sample.drop('is_attributed', axis=1) y = train_sample[['is_attributed']] # split data into train and test/validation sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=101) print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) # check the average download rates in train and test data, should be comparable print(y_train.mean()) print(y_test.mean())
is_attributed 0.002275 dtype: float64 is_attributed 0.00225 dtype: float64
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
AdaBoost
# adaboost classifier with max 600 decision trees of depth=2 # learning_rate/shrinkage=1.5 # base estimator tree = DecisionTreeClassifier(max_depth=2) # adaboost with the tree as base estimator adaboost_model_1 = AdaBoostClassifier( base_estimator=tree, n_estimators=600, learning_rate=1.5, algorithm="SAMME") # fit adaboost_model_1.fit(X_train, y_train) # predictions # the second column represents the probability of a click resulting in a download predictions = adaboost_model_1.predict_proba(X_test) predictions[:10] # metrics: AUC metrics.roc_auc_score(y_test, predictions[:,1])
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
AdaBoost - Hyperparameter TuningLet's now tune the hyperparameters of the AdaBoost classifier. In this case, we have two types of hyperparameters - those of the component trees (max_depth etc.) and those of the ensemble (n_estimators, learning_rate etc.). We can tune both using the following technique - the keys of the form ```base_estimator_parameter_name``` belong to the trees (base estimator), and the rest belong to the ensemble.
# parameter grid param_grid = {"base_estimator__max_depth" : [2, 5], "n_estimators": [200, 400, 600] } # base estimator tree = DecisionTreeClassifier() # adaboost with the tree as base estimator # learning rate is arbitrarily set to 0.6, we'll discuss learning_rate below ABC = AdaBoostClassifier( base_estimator=tree, learning_rate=0.6, algorithm="SAMME") # run grid search folds = 3 grid_search_ABC = GridSearchCV(ABC, cv = folds, param_grid=param_grid, scoring = 'roc_auc', return_train_score=True, verbose = 1) # fit grid_search_ABC.fit(X_train, y_train) # cv results cv_results = pd.DataFrame(grid_search_ABC.cv_results_) cv_results # plotting AUC with hyperparameter combinations plt.figure(figsize=(16,6)) for n, depth in enumerate(param_grid['base_estimator__max_depth']): # subplot 1/n plt.subplot(1,3, n+1) depth_df = cv_results[cv_results['param_base_estimator__max_depth']==depth] plt.plot(depth_df["param_n_estimators"], depth_df["mean_test_score"]) plt.plot(depth_df["param_n_estimators"], depth_df["mean_train_score"]) plt.xlabel('n_estimators') plt.ylabel('AUC') plt.title("max_depth={0}".format(depth)) plt.ylim([0.60, 1]) plt.legend(['test score', 'train score'], loc='upper left') plt.xscale('log')
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
The results above show that:- The ensemble with max_depth=5 is clearly overfitting (training auc is almost 1, while the test score is much lower)- At max_depth=2, the model performs slightly better (approx 95% AUC) with a higher test score Thus, we should go ahead with ```max_depth=2``` and ```n_estimators=200```.Note that we haven't experimented with many other important hyperparameters till now, such as ```learning rate```, ```subsample``` etc., and the results might be considerably improved by tuning them. We'll next experiment with these hyperparameters.
# model performance on test data with chosen hyperparameters # base estimator tree = DecisionTreeClassifier(max_depth=2) # adaboost with the tree as base estimator # learning rate is arbitrarily set, we'll discuss learning_rate below ABC = AdaBoostClassifier( base_estimator=tree, learning_rate=0.6, n_estimators=200, algorithm="SAMME") ABC.fit(X_train, y_train) # predict on test data predictions = ABC.predict_proba(X_test) predictions[:10] # roc auc metrics.roc_auc_score(y_test, predictions[:, 1])
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Gradient Boosting ClassifierLet's now try the gradient boosting classifier. We'll experiment with two main hyperparameters now - ```learning_rate``` (shrinkage) and ```subsample```. By adjusting the learning rate to less than 1, we can regularize the model. A model with higher learning_rate learns fast, but is prone to overfitting; one with a lower learning rate learns slowly, but avoids overfitting.Also, there's a trade-off between ```learning_rate``` and ```n_estimators``` - the higher the learning rate, the lesser trees the model needs (and thus we usually tune only one of them).Also, by subsampling (setting ```subsample``` to less than 1), we can have the individual models built on random subsamples of size ```subsample```. That way, each tree will be trained on different subsets and reduce the model's variance.
# parameter grid param_grid = {"learning_rate": [0.2, 0.6, 0.9], "subsample": [0.3, 0.6, 0.9] } # adaboost with the tree as base estimator GBC = GradientBoostingClassifier(max_depth=2, n_estimators=200) # run grid search folds = 3 grid_search_GBC = GridSearchCV(GBC, cv = folds, param_grid=param_grid, scoring = 'roc_auc', return_train_score=True, verbose = 1) grid_search_GBC.fit(X_train, y_train) cv_results = pd.DataFrame(grid_search_GBC.cv_results_) cv_results.head() # # plotting plt.figure(figsize=(16,6)) for n, subsample in enumerate(param_grid['subsample']): # subplot 1/n plt.subplot(1,len(param_grid['subsample']), n+1) df = cv_results[cv_results['param_subsample']==subsample] plt.plot(df["param_learning_rate"], df["mean_test_score"]) plt.plot(df["param_learning_rate"], df["mean_train_score"]) plt.xlabel('learning_rate') plt.ylabel('AUC') plt.title("subsample={0}".format(subsample)) plt.ylim([0.60, 1]) plt.legend(['test score', 'train score'], loc='upper left') plt.xscale('log')
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
It is clear from the plot above that the model with a lower subsample ratio performs better, while those with higher subsamples tend to overfit. Also, a lower learning rate results in less overfitting. XGBoostLet's finally try XGBoost. The hyperparameters are the same, some important ones being ```subsample```, ```learning_rate```, ```max_depth``` etc.
# fit model on training data with default hyperparameters model = XGBClassifier() model.fit(X_train, y_train) # make predictions for test data # use predict_proba since we need probabilities to compute auc y_pred = model.predict_proba(X_test) y_pred[:10] # evaluate predictions roc = metrics.roc_auc_score(y_test, y_pred[:, 1]) print("AUC: %.2f%%" % (roc * 100.0))
AUC: 94.85%
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
The roc_auc in this case is about 0.95% with default hyperparameters. Let's try changing the hyperparameters - an exhaustive list of XGBoost hyperparameters is here: http://xgboost.readthedocs.io/en/latest/parameter.html Let's now try tuning the hyperparameters using k-fold CV. We'll then use grid search CV to find the optimal values of hyperparameters.
# hyperparameter tuning with XGBoost # creating a KFold object folds = 3 # specify range of hyperparameters param_grid = {'learning_rate': [0.2, 0.6], 'subsample': [0.3, 0.6, 0.9]} # specify model xgb_model = XGBClassifier(max_depth=2, n_estimators=200) # set up GridSearchCV() model_cv = GridSearchCV(estimator = xgb_model, param_grid = param_grid, scoring= 'roc_auc', cv = folds, verbose = 1, return_train_score=True) # fit the model model_cv.fit(X_train, y_train) # cv results cv_results = pd.DataFrame(model_cv.cv_results_) cv_results # convert parameters to int for plotting on x-axis #cv_results['param_learning_rate'] = cv_results['param_learning_rate'].astype('float') #cv_results['param_max_depth'] = cv_results['param_max_depth'].astype('float') cv_results.head() # # plotting plt.figure(figsize=(16,6)) param_grid = {'learning_rate': [0.2, 0.6], 'subsample': [0.3, 0.6, 0.9]} for n, subsample in enumerate(param_grid['subsample']): # subplot 1/n plt.subplot(1,len(param_grid['subsample']), n+1) df = cv_results[cv_results['param_subsample']==subsample] plt.plot(df["param_learning_rate"], df["mean_test_score"]) plt.plot(df["param_learning_rate"], df["mean_train_score"]) plt.xlabel('learning_rate') plt.ylabel('AUC') plt.title("subsample={0}".format(subsample)) plt.ylim([0.60, 1]) plt.legend(['test score', 'train score'], loc='upper left') plt.xscale('log')
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
The results show that a subsample size of 0.6 and learning_rate of about 0.2 seems optimal. Also, XGBoost has resulted in the highest ROC AUC obtained (across various hyperparameters). Let's build a final model with the chosen hyperparameters.
# chosen hyperparameters # 'objective':'binary:logistic' outputs probability rather than label, which we need for auc params = {'learning_rate': 0.2, 'max_depth': 2, 'n_estimators':200, 'subsample':0.6, 'objective':'binary:logistic'} # fit model on training data model = XGBClassifier(params = params) model.fit(X_train, y_train) # predict y_pred = model.predict_proba(X_test) y_pred[:10]
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
The first column in y_pred is the P(0), i.e. P(not fraud), and the second column is P(1/fraud).
# roc_auc auc = sklearn.metrics.roc_auc_score(y_test, y_pred[:, 1]) auc
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Finally, let's also look at the feature importances.
# feature importance importance = dict(zip(X_train.columns, model.feature_importances_)) importance # plot plt.bar(range(len(model.feature_importances_)), model.feature_importances_) plt.show()
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Predictions on Test DataSince this problem is hosted on Kaggle, you can choose to make predictions on the test data and submit your results. Please note the following points and recommendations if you go ahead with Kaggle:Recommendations for training:- We have used only a fraction of the training set (train_sample, 100k rows), the full training data on Kaggle (train.csv) has about 180 million rows. You'll get good results only if you train the model on a significant portion of the training dataset. - Because of the size, you'll need to use Kaggle kernels to train the model on full training data. Kaggle kernels provide powerful computation capacities on cloud (for free). - Even on the kernel, you may need to use a portion of the training dataset (try using the last 20-30 million rows).- Make sure you save memory by following some tricks and best practices, else you won't be able to train the model at all on a large dataset.
# # read submission file #sample_sub = pd.read_csv(path+'sample_submission.csv') #sample_sub.head() # # predict probability of test data # test_final = pd.read_csv(path+'test.csv') # test_final.head() # # predictions on test data # test_final = timeFeatures(test_final) # test_final.head() # test_final.drop(['click_time', 'datetime'], axis=1, inplace=True) # test_final.head() # test_final[categorical_cols]=test_final[categorical_cols].apply(lambda x: le.fit_transform(x)) # test_final.info() # # number of clicks by IP # ip_count = test_final.groupby('ip')['channel'].count().reset_index() # ip_count.columns = ['ip', 'count_by_ip'] # ip_count.head() # merge this with the training data # test_final = pd.merge(test_final, ip_count, on='ip', how='left') # del ip_count # test_final.info() # # predict on test data # y_pred_test = model.predict_proba(test_final.drop('click_id', axis=1)) # y_pred_test[:10] # # # create submission file # sub = pd.DataFrame() # sub['click_id'] = test_final['click_id'] # sub['is_attributed'] = y_pred_test[:, 1] # sub.head() # sub.to_csv('kshitij_sub_03.csv', float_format='%.8f', index=False) # # model # dtrain = xgb.DMatrix(X_train, y_train) # del X_train, y_train # gc.collect() # watchlist = [(dtrain, 'train')] # model = xgb.train(params, dtrain, 30, watchlist, maximize=True, verbose_eval=1) # del dtrain # gc.collect() # # Plot the feature importance from xgboost # plot_importance(model) # plt.gcf().savefig('feature_importance_xgb.png') # # Load the test for predict # test = pd.read_csv(path+"test.csv") # test.head() # # number of clicks by IP # ip_count = train_sample.groupby('ip')['channel'].count().reset_index() # ip_count.columns = ['ip', 'count_by_ip'] # ip_count.head() # test = pd.merge(test, ip_count, on='ip', how='left', sort=False) # gc.collect() # test = timeFeatures(test) # test.drop(['click_time', 'datetime'], axis=1, inplace=True) # test.head() # print(test.columns) # print(train_sample.columns) # test = test[['click_id','ip', 'app', 'device', 'os', 'channel', 'day_of_week', # 'day_of_year', 'month', 'hour', 'count_by_ip']] # dtest = xgb.DMatrix(test.drop('click_id', axis=1)) # # Save the predictions # sub = pd.DataFrame() # sub['click_id'] = test['click_id'] # sub['is_attributed'] = model.predict(dtest, ntree_limit=model.best_ntree_limit) # sub.to_csv('xgb_sub.csv', float_format='%.8f', index=False) # sub.shape
_____no_output_____
MIT
TalkingData+Click+Fraud+.ipynb
gyanadata/TalkingData-Fraudulent-Click-Prediction
Topic 2: Neural network Lesson 1: Introduction to Neural Networks 1. AND perceptronComplete the cell below:
import pandas as pd # TODO: Set weight1, weight2, and bias weight1 = 0.0 weight2 = 0.0 bias = 0.0 # DON'T CHANGE ANYTHING BELOW # Inputs and outputs test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)] correct_outputs = [False, False, False, True] outputs = [] # Generate and check output for test_input, correct_output in zip(test_inputs, correct_outputs): linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias output = int(linear_combination >= 0) is_correct_string = 'Yes' if output == correct_output else 'No' outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string]) # Print output num_wrong = len([output[4] for output in outputs if output[4] == 'No']) output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct']) if not num_wrong: print('Nice! You got it all correct.\n') else: print('You got {} wrong. Keep trying!\n'.format(num_wrong)) print(output_frame.to_string(index=False))
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
My answer:
import pandas as pd # TODO: Set weight1, weight2, and bias k = 100 weight1 = k * 1.0 weight2 = k * 1.0 bias = k * (-2.0) # DON'T CHANGE ANYTHING BELOW # Inputs and outputs test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)] correct_outputs = [False, False, False, True] outputs = [] # Generate and check output for test_input, correct_output in zip(test_inputs, correct_outputs): linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias output = int(linear_combination >= 0) is_correct_string = 'Yes' if output == correct_output else 'No' outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string]) # Print output num_wrong = len([output[4] for output in outputs if output[4] == 'No']) output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct']) if not num_wrong: print('Nice! You got it all correct.\n') else: print('You got {} wrong. Keep trying!\n'.format(num_wrong)) print(output_frame.to_string(index=False))
Nice! You got it all correct. Input 1 Input 2 Linear Combination Activation Output Is Correct 0 0 -200.0 0 Yes 0 1 -100.0 0 Yes 1 0 -100.0 0 Yes 1 1 0.0 1 Yes
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
2. OR PerceptronComplete the cell below:
import pandas as pd # TODO: Set weight1, weight2, and bias weight1 = 0.0 weight2 = 0.0 bias = 0.0 # DON'T CHANGE ANYTHING BELOW # Inputs and outputs test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)] correct_outputs = [False, True, True, True] outputs = [] # Generate and check output for test_input, correct_output in zip(test_inputs, correct_outputs): linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias output = int(linear_combination >= 0) is_correct_string = 'Yes' if output == correct_output else 'No' outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string]) # Print output num_wrong = len([output[4] for output in outputs if output[4] == 'No']) output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct']) if not num_wrong: print('Nice! You got it all correct.\n') else: print('You got {} wrong. Keep trying!\n'.format(num_wrong)) print(output_frame.to_string(index=False))
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
My answer:
import pandas as pd # TODO: Set weight1, weight2, and bias k = 100 weight1 = k * 1.0 weight2 = k * 1.0 bias = k * (-1.0) # DON'T CHANGE ANYTHING BELOW # Inputs and outputs test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)] correct_outputs = [False, True, True, True] outputs = [] # Generate and check output for test_input, correct_output in zip(test_inputs, correct_outputs): linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias output = int(linear_combination >= 0) is_correct_string = 'Yes' if output == correct_output else 'No' outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string]) # Print output num_wrong = len([output[4] for output in outputs if output[4] == 'No']) output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct']) if not num_wrong: print('Nice! You got it all correct.\n') else: print('You got {} wrong. Keep trying!\n'.format(num_wrong)) print(output_frame.to_string(index=False))
Nice! You got it all correct. Input 1 Input 2 Linear Combination Activation Output Is Correct 0 0 -100.0 0 Yes 0 1 0.0 1 Yes 1 0 0.0 1 Yes 1 1 100.0 1 Yes
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
2 ways to transform AND perceptron to OR perceptron:* Increase the weights $w$* Decrease the magnitude of the bias $|b|$ 3. NOT PerceptronComplete the code below:Only consider the second number in ```test_inputs``` is the input, ignore the first number.
import pandas as pd # TODO: Set weight1, weight2, and bias weight1 = 0.0 weight2 = 0.0 bias = 0.0 # DON'T CHANGE ANYTHING BELOW # Inputs and outputs test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)] correct_outputs = [True, False, True, False] outputs = [] # Generate and check output for test_input, correct_output in zip(test_inputs, correct_outputs): linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias output = int(linear_combination >= 0) is_correct_string = 'Yes' if output == correct_output else 'No' outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string]) # Print output num_wrong = len([output[4] for output in outputs if output[4] == 'No']) output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct']) if not num_wrong: print('Nice! You got it all correct.\n') else: print('You got {} wrong. Keep trying!\n'.format(num_wrong)) print(output_frame.to_string(index=False))
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
My answer:
import pandas as pd # TODO: Set weight1, weight2, and bias k = 100 weight1 = 0.0 weight2 = k * (-1.0) bias = 0.0 # DON'T CHANGE ANYTHING BELOW # Inputs and outputs test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)] correct_outputs = [True, False, True, False] outputs = [] # Generate and check output for test_input, correct_output in zip(test_inputs, correct_outputs): linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias output = int(linear_combination >= 0) is_correct_string = 'Yes' if output == correct_output else 'No' outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string]) # Print output num_wrong = len([output[4] for output in outputs if output[4] == 'No']) output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct']) if not num_wrong: print('Nice! You got it all correct.\n') else: print('You got {} wrong. Keep trying!\n'.format(num_wrong)) print(output_frame.to_string(index=False))
Nice! You got it all correct. Input 1 Input 2 Linear Combination Activation Output Is Correct 0 0 0.0 1 Yes 0 1 -100.0 0 Yes 1 0 0.0 1 Yes 1 1 -100.0 0 Yes
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
4. XOR Perceptronan XOR Perceptron can be built by an AND Perceptron, an OR Perceptron and a NOT Perceptron.(image source: Udacity)```NAND``` consists of an AND perceptron and a NON perceptron. 5. Perceptron algorithmComplete the cell below:
import numpy as np # Setting the random seed, feel free to change it and see different solutions. np.random.seed(42) def stepFunction(t): if t >= 0: return 1 return 0 def prediction(X, W, b): return stepFunction((np.matmul(X,W)+b)[0]) # TODO: Fill in the code below to implement the perceptron trick. # The function should receive as inputs the data X, the labels y, # the weights W (as an array), and the bias b, # update the weights and bias W, b, according to the perceptron algorithm, # and return W and b. def perceptronStep(X, y, W, b, learn_rate = 0.01): # Fill in code return W, b # This function runs the perceptron algorithm repeatedly on the dataset, # and returns a few of the boundary lines obtained in the iterations, # for plotting purposes. # Feel free to play with the learning rate and the num_epochs, # and see your results plotted below. def trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25): x_min, x_max = min(X.T[0]), max(X.T[0]) y_min, y_max = min(X.T[1]), max(X.T[1]) W = np.array(np.random.rand(2,1)) b = np.random.rand(1)[0] + x_max # These are the solution lines that get plotted below. boundary_lines = [] for i in range(num_epochs): # In each epoch, we apply the perceptron step. W, b = perceptronStep(X, y, W, b, learn_rate) boundary_lines.append((-W[0]/W[1], -b/W[1])) return boundary_lines
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
This is data.csv: ```0.78051,-0.063669,10.28774,0.29139,10.40714,0.17878,10.2923,0.4217,10.50922,0.35256,10.27785,0.10802,10.27527,0.33223,10.43999,0.31245,10.33557,0.42984,10.23448,0.24986,10.0084492,0.13658,10.12419,0.33595,10.25644,0.42624,10.4591,0.40426,10.44547,0.45117,10.42218,0.20118,10.49563,0.21445,10.30848,0.24306,10.39707,0.44438,10.32945,0.39217,10.40739,0.40271,10.3106,0.50702,10.49638,0.45384,10.10073,0.32053,10.69907,0.37307,10.29767,0.69648,10.15099,0.57341,10.16427,0.27759,10.33259,0.055964,10.53741,0.28637,10.19503,0.36879,10.40278,0.035148,10.21296,0.55169,10.48447,0.56991,10.25476,0.34596,10.21726,0.28641,10.67078,0.46538,10.3815,0.4622,10.53838,0.32774,10.4849,0.26071,10.37095,0.38809,10.54527,0.63911,10.32149,0.12007,10.42216,0.61666,10.10194,0.060408,10.15254,0.2168,10.45558,0.43769,10.28488,0.52142,10.27633,0.21264,10.39748,0.31902,10.5533,1,00.44274,0.59205,00.85176,0.6612,00.60436,0.86605,00.68243,0.48301,01,0.76815,00.72989,0.8107,00.67377,0.77975,00.78761,0.58177,00.71442,0.7668,00.49379,0.54226,00.78974,0.74233,00.67905,0.60921,00.6642,0.72519,00.79396,0.56789,00.70758,0.76022,00.59421,0.61857,00.49364,0.56224,00.77707,0.35025,00.79785,0.76921,00.70876,0.96764,00.69176,0.60865,00.66408,0.92075,00.65973,0.66666,00.64574,0.56845,00.89639,0.7085,00.85476,0.63167,00.62091,0.80424,00.79057,0.56108,00.58935,0.71582,00.56846,0.7406,00.65912,0.71548,00.70938,0.74041,00.59154,0.62927,00.45829,0.4641,00.79982,0.74847,00.60974,0.54757,00.68127,0.86985,00.76694,0.64736,00.69048,0.83058,00.68122,0.96541,00.73229,0.64245,00.76145,0.60138,00.58985,0.86955,00.73145,0.74516,00.77029,0.7014,00.73156,0.71782,00.44556,0.57991,00.85275,0.85987,00.51912,0.62359,0``` My answer:
import numpy as np X = np.array([ [0.78051,-0.063669], [0.28774,0.29139], [0.40714,0.17878], [0.2923,0.4217], [0.50922,0.35256], [0.27785,0.10802], [0.27527,0.33223], [0.43999,0.31245], [0.33557,0.42984], [0.23448,0.24986], [0.0084492,0.13658], [0.12419,0.33595], [0.25644,0.42624], [0.4591,0.40426], [0.44547,0.45117], [0.42218,0.20118], [0.49563,0.21445], [0.30848,0.24306], [0.39707,0.44438], [0.32945,0.39217], [0.40739,0.40271], [0.3106,0.50702], [0.49638,0.45384], [0.10073,0.32053], [0.69907,0.37307], [0.29767,0.69648], [0.15099,0.57341], [0.16427,0.27759], [0.33259,0.055964], [0.53741,0.28637], [0.19503,0.36879], [0.40278,0.035148], [0.21296,0.55169], [0.48447,0.56991], [0.25476,0.34596], [0.21726,0.28641], [0.67078,0.46538], [0.3815,0.4622], [0.53838,0.32774], [0.4849,0.26071], [0.37095,0.38809], [0.54527,0.63911], [0.32149,0.12007], [0.42216,0.61666], [0.10194,0.060408], [0.15254,0.2168], [0.45558,0.43769], [0.28488,0.52142], [0.27633,0.21264], [0.39748,0.31902], [0.5533,1], [0.44274,0.59205], [0.85176,0.6612], [0.60436,0.86605], [0.68243,0.48301], [1,0.76815], [0.72989,0.8107], [0.67377,0.77975], [0.78761,0.58177], [0.71442,0.7668], [0.49379,0.54226], [0.78974,0.74233], [0.67905,0.60921], [0.6642,0.72519], [0.79396,0.56789], [0.70758,0.76022], [0.59421,0.61857], [0.49364,0.56224], [0.77707,0.35025], [0.79785,0.76921], [0.70876,0.96764], [0.69176,0.60865], [0.66408,0.92075], [0.65973,0.66666], [0.64574,0.56845], [0.89639,0.7085], [0.85476,0.63167], [0.62091,0.80424], [0.79057,0.56108], [0.58935,0.71582], [0.56846,0.7406], [0.65912,0.71548], [0.70938,0.74041], [0.59154,0.62927], [0.45829,0.4641], [0.79982,0.74847], [0.60974,0.54757], [0.68127,0.86985], [0.76694,0.64736], [0.69048,0.83058], [0.68122,0.96541], [0.73229,0.64245], [0.76145,0.60138], [0.58985,0.86955], [0.73145,0.74516], [0.77029,0.7014], [0.73156,0.71782], [0.44556,0.57991], [0.85275,0.85987], [0.51912,0.62359] ]) y = np.array([ [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0] ]) print(X.shape) print(y.shape) import numpy as np # Setting the random seed, feel free to change it and see different solutions. np.random.seed(42) def stepFunction(t): if t >= 0: return 1 return 0 def prediction(X, W, b): return stepFunction((np.matmul(X,W)+b)[0]) # TODO: Fill in the code below to implement the perceptron trick. # The function should receive as inputs the data X, the labels y, # the weights W (as an array), and the bias b, # update the weights and bias W, b, according to the perceptron algorithm, # and return W and b. def perceptronStep(X, y, W, b, learn_rate = 0.01): # Fill in code for i in range(len(y)): true_label = y[i] pred = prediction(X[i], W, b) if true_label == pred: continue else: if pred == 1 and true_label == 0: # the point is classified positive, but it has a negative label W -= learn_rate * X[i].reshape(-1, 1) b -= learn_rate elif pred == 0 and true_label == 1: # the point is classified negative, but it has a positive label W += learn_rate * X[i].reshape(-1, 1) b += learn_rate return W, b # This function runs the perceptron algorithm repeatedly on the dataset, # and returns a few of the boundary lines obtained in the iterations, # for plotting purposes. # Feel free to play with the learning rate and the num_epochs, # and see your results plotted below. def trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25): x_min, x_max = min(X.T[0]), max(X.T[0]) y_min, y_max = min(X.T[1]), max(X.T[1]) W = np.array(np.random.rand(2,1)) b = np.random.rand(1)[0] + x_max # These are the solution lines that get plotted below. boundary_lines = [] for i in range(num_epochs): # In each epoch, we apply the perceptron step. W, b = perceptronStep(X, y, W, b, learn_rate) boundary_lines.append((-W[0]/W[1], -b/W[1])) return boundary_lines
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
Solution:```def perceptronStep(X, y, W, b, learn_rate = 0.01): for i in range(len(X)): y_hat = prediction(X[i],W,b) if y[i]-y_hat == 1: W[0] += X[i][0]*learn_rate W[1] += X[i][1]*learn_rate b += learn_rate elif y[i]-y_hat == -1: W[0] -= X[i][0]*learn_rate W[1] -= X[i][1]*learn_rate b -= learn_rate return W, b``` 6. SoftmaxComplete the code below:
import numpy as np # Write a function that takes as input a list of numbers, and returns # the list of values given by the softmax function. def softmax(L): pass
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
My answer:
import numpy as np # Write a function that takes as input a list of numbers, and returns # the list of values given by the softmax function. def softmax(L): return [(np.exp(L[i]) / np.sum(np.exp(L))) for i in range(len(L))] L = [0, 2, 1] softmax(L)
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
7. Cross-EntropyFormula: $$Cross Entropy = - \sum_{i=1}^{|X|}y_i log(p_i) + (1 - y_i) log(1 - p_i) $$where * $y_i$ is the true label for $i^{th}$ instance* $p_i$ is the probability of the $i^{th}$ instance is positive.Complete the code below
import numpy as np # Write a function that takes as input two lists Y, P, # and returns the float corresponding to their cross-entropy. def cross_entropy(Y, P): pass
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
My answer:
import numpy as np # Write a function that takes as input two lists Y, P, # and returns the float corresponding to their cross-entropy. def cross_entropy(Y, P): return -np.sum([Y[i] * np.log(P[i]) + (1 - Y[i]) * np.log(1 - P[i]) for i in range(len(Y))]) Y = np.array([1, 0, 1, 1]) P = np.array([0.4, 0.6, 0.1, 0.5]) assert float(format(cross_entropy(Y, P), '.10f')) == 4.8283137373
_____no_output_____
MIT
exercise_notebooks_my_solutions/2. Neural Networks/1. Introduction to Neural Networks.ipynb
Yixuan-Lee/udacity-deep-learning-nanodegree
AHDB wheat lodging risk and recommendationsThis example notebook was inspired by the [AHDB lodging practical guidelines](https://ahdb.org.uk/knowledge-library/lodging): we evaluate the lodging risk for a field and output practical recommendations. We then adjust the estimated risk according to the Leaf Area Index (LAI) and Green Cover Fraction (GCF) obtained using the Agrimetrics GraphQL API. AHDB lodging resistance scoreAHDB's guidelines show how a lodging resistance score can be calculated based on:- the crop variety's natural resistance to lodging without Plant Growth Regulators (PGR)- the soil Nitrogen Suply (SNS) index, a higher supply increases lodging risk- the sowing date, an earlier sowing increases lodging risk- the sowing density, higher plant density increases lodging riskThe overall lodging resistance score is the sum of the individual scores. AHDB practical advice on reducing the risk of lodging is given for 4 resistance score categories:| Lodging resistance category | Lodging risk ||---|---|| below 5 | very high || 5-6.8 | high || 7-8.8 | medium || 9-10 | low || over 10 | very low | [Table image](img/lodging/ahdb_risk_categories.png)
# Input AHDB factors for evaluating lodging risks def sns_index_score(sns_index): return 3 - 6 * sns_index / 4 # Sowing dates and associated lodging resistance score sowing_date_scores = {'Mid Sept': -2, 'End Sept': -1, 'Mid Oct': 0, 'End Oct': 1, 'Nov onwards': 2} # Density ranges and associated lodging resistance score sowing_density_scores = {'<150': 1.5, '200-150': +0.75, '300-200': 0, '400-300': -1, '>400': -1.75} # AHDB resistance score categories def score_category(score): if score < 5: return 'below 5' if score < 7: return '5-6.8' if score < 9: return '7-8.8' if score < 10: return '9-10' return 'over 10' # Combine individual factor scores def lodging_resistance_category(resistance_score, sns_index, sowing_date, sowing_density): score = resistance_score + sns_index_score(sns_index) + sowing_date_scores[sowing_date] + sowing_density_scores[sowing_density] return score_category(score)
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
AHDB practical adviceAHDB provides practical advice for managing the risk of stem and root lodging. This advice depends on the resistance score calculated specifically for a field. AHDB recommends fertilizer and PGR actions for managing stem lodging risk. For root lodging, AHDB also advises if the crop needs to be rolled (before the crop has reached stage "GS30").
# Nitrogen fertiliser advice for stem risk stem_risk_N_advice = { 'below 5': 'Delay & reduce N', '5-6.8': 'Delay & reduce N', '7-8.8': 'Delay N', } # PGR advice for stem risk stem_risk_PGR_advice = { 'below 5': 'Full PGR', '5-6.8': 'Full PGR', '7-8.8': 'Single PGR', '9-10': 'PGR if high yield forecast' } # Nitrogen fertiliser advice for root risk root_risk_N_advice = { 'below 5': 'Reduce N', '5-6.8': 'Reduce N', } # PGR advice for root risk root_risk_PGR_advice = { 'below 5': 'Full PGR', '5-6.8': 'Full PGR', '7-8.8': 'Single PGR', '9-10': 'PGR if high yield forecast' } # Spring rolling advice for root risk root_risk_Roll_advice = { 'below 5': 'Roll', '5-6.8': 'Roll', '7-8.8': 'Roll', }
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
AHDB standard lodging risk management recommendationsUsing the definitions above, we can calculate the AHDB recommendation according to individual factors:
import pandas as pd from ipywidgets import widgets from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() style = {'description_width': 'initial'} def ahdb_lodging_recommendation(resistance_score, sns_index, sowing_date, sowing_density): category = lodging_resistance_category(resistance_score, sns_index, sowing_date, sowing_density) return pd.DataFrame(index=['Fertiliser nitrogen', 'Plant growth regulators', 'Spring rolling'], data={ 'Stem lodging': [stem_risk_N_advice.get(category, ''), stem_risk_PGR_advice.get(category, ''), '' ], 'Root lodging': [root_risk_N_advice.get(category, ''), root_risk_PGR_advice.get(category, ''), root_risk_Roll_advice.get(category, '')] }) widgets.interact(ahdb_lodging_recommendation, resistance_score = widgets.IntSlider(description='Resistance score without PGR', min=1, max=9, style=style), sns_index = widgets.IntSlider(description='SNS index', min=0, max=4, style=style), sowing_date = widgets.SelectionSlider(description='Sowing date', options=sowing_date_scores.keys(), style=style), sowing_density = widgets.SelectionSlider(description='Sowing density', options=sowing_density_scores.keys(), style=style), )
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
[Widget image](img/lodging/recommendations_slider.png) Adjusting recommendations based on remote sensing informationThe same practical guidelines from AHDB explains that crop conditions in Spring can indicate future lodging risk. In particular, Green Area Index (GAI) greater than 2 or Ground Cover Fraction (GCF) above 60% are indicative of increased stem lodging risk. For adjusting our practical advice, we will retrieve LAI and GCF from Agrimetrics GraphQL API. Using Agrimetrics GraphQL APIAn Agrimetrics API key must be provided with each GraphQL API in a custom request header Ocp-Apim-Subscription-Key. For more information about how to obtain and use an Agrimetrics API key, please consult the [Developer portal](https://developer.agrimetrics.co.uk). To get started with GraphQL, see [Agrimetrics Graph Explorer](https://app.agrimetrics.co.uk//graph-explorer) tool.
import os import requests GRAPHQL_ENDPOINT = "https://api.agrimetrics.co.uk/graphql/v1/" if "API_KEY" in os.environ: API_KEY = os.environ["API_KEY"] else: API_KEY = input("Query API Subscription Key: ").strip()
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
We will also need a short function to help catch and report errors from making GraphQL queries.
def check_results(result): if result.status_code != 200: raise Exception(f"Request failed with code {result.status_code}.\n{result.text}") errors = result.json().get("errors", []) if errors: for err in errors: print(f"{err['message']}:") print( " at", " and ".join([f"line {loc['line']}, col {loc['column']}" for loc in err['locations']])) print( " path", ".".join(err['path'])) print(f" {err['extensions']}") raise Exception(f"GraphQL reported {len(errors)} errors")
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
A GraphQL query is posted to the GraphQL endpoint in a json body. With our first query, we retrieve the Agrimetrics field id at a given location.
graphql_url = 'https://api.agrimetrics.co.uk/graphql' headers = { 'Ocp-Apim-Subscription-Key': API_KEY, 'Content-Type': "application/json", 'Accept-Encoding': "gzip, deflate, br", } centroid = (-0.929365345, 51.408374978) response = requests.post(graphql_url, headers=headers, json={ 'query': ''' query getFieldAtLocation($centroid: CoordinateScalar!) { fields(geoFilter: {location: {type: Point, coordinates: $centroid}, distance: {LE: 10}}) { id } } ''', 'variables': { 'centroid': centroid } }) check_results(response) field_id = response.json()['data']['fields'][0]['id'] print('Agrimetrics field id:', field_id)
Agrimetrics field id: https://data.agrimetrics.co.uk/fields/BZwCrEVaXO62NTX_Jfl1yw
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
GraphQL API supports filtering by object ids. Here, we retrieve the sowing crop information associated to the field id obtained in our first query.
# Verify field was a wheat crop in 2018 response = requests.post(graphql_url, headers=headers, json={ 'query': ''' query getSownCrop($fieldId: [ID!]!) { fields(where: {id: {EQ: $fieldId}}) { sownCrop { cropType harvestYear } } } ''', 'variables': { 'fieldId': field_id } }) check_results(response) print(response.json()['data']['fields'][0]['sownCrop'])
[{'cropType': 'WHEAT', 'harvestYear': 2016}, {'cropType': 'MAIZE', 'harvestYear': 2017}, {'cropType': 'WHEAT', 'harvestYear': 2018}]
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
It is necessary to register for accessing Verde crop observations on our field of interest. LAI is a crop-specific attribute, so it is necessary to provide `cropType` when registering.
# Register for CROP_SPECIFIC verde data on our field response = requests.post(graphql_url, headers=headers, json={ 'query': ''' mutation registerCropObservations($fieldId: ID!) { account { premiumData { addCropObservationRegistrations(registrations: {fieldId: $fieldId, layerType: CROP_SPECIFIC, cropType: WHEAT, season: SEP2017TOSEP2018}) { id } } } } ''', 'variables': {'fieldId': field_id} }) check_results(response)
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
GCF is not crop specific, so we need to register as well for accessing non crop-specific attributes.
# Register for NON_CROP_SPECIFIC verde data on our field response = requests.post(graphql_url, headers=headers, json={ 'query': ''' mutation registerCropObservations($fieldId: ID!) { account { premiumData { addCropObservationRegistrations(registrations: {fieldId: $fieldId, layerType: NON_CROP_SPECIFIC, season: SEP2017TOSEP2018}) { id } } } } ''', 'variables': {'fieldId': field_id} }) check_results(response)
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
Once Verde data for this field is available, we can easily retrieve it, for instance:
response = requests.post(graphql_url, headers=headers, json={ 'query': ''' query getCropObservations($fieldId: [ID!]!) { fields(where: {id: {EQ: $fieldId}}) { cropObservations { leafAreaIndex { dateTime mean } } } } ''', 'variables': {'fieldId': field_id} }) check_results(response)
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
The data can be loaded as a pandas DataFrame:
results = response.json() leafAreaIndex = pd.io.json.json_normalize( results['data']['fields'], record_path=['cropObservations', 'leafAreaIndex'], ) leafAreaIndex['date_time'] = pd.to_datetime(leafAreaIndex['dateTime']) leafAreaIndex['value'] = leafAreaIndex['mean'] leafAreaIndex = leafAreaIndex[['date_time', 'value']] leafAreaIndex.head()
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
[Table image](img/lodging/lai_for_field.png) We proceed to a second similar query to obtain green vegetation cover fraction:
response = requests.post(graphql_url, headers=headers, json={ 'query': ''' query getCropObservations($fieldId: [ID!]!) { fields(where: {id: {EQ: $fieldId}}) { cropObservations { greenVegetationCoverFraction { dateTime mean } } } } ''', 'variables': {'fieldId': field_id} }) check_results(response) results = response.json() greenCoverFraction = pd.io.json.json_normalize( results['data']['fields'], record_path=['cropObservations', 'greenVegetationCoverFraction'], ) greenCoverFraction['date_time'] = pd.to_datetime(greenCoverFraction['dateTime']) greenCoverFraction['value'] = greenCoverFraction['mean'] greenCoverFraction = greenCoverFraction[['date_time', 'value']]
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
A year of observations was retrieved:
import matplotlib.pyplot as plt plt.plot(leafAreaIndex['date_time'], leafAreaIndex['value'], label='LAI') plt.plot(greenCoverFraction['date_time'], greenCoverFraction['value'], label='GCF') plt.legend() plt.show()
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
[Graph image](img/lodging/lai_gfc.png) Adjusting recommendationGS31 marks the beginning of the stem elongation and generally occurs around mid April. Let's filter our LAI and GCF around this time of year:
from datetime import datetime, timezone from_date = datetime(2018, 4, 7, tzinfo=timezone.utc) to_date = datetime(2018, 4, 21, tzinfo=timezone.utc) leafAreaIndex_mid_april = leafAreaIndex[(leafAreaIndex['date_time'] > from_date) & (leafAreaIndex['date_time'] < to_date)] greenCoverFraction_mid_april = greenCoverFraction[(greenCoverFraction['date_time'] > from_date) & (greenCoverFraction['date_time'] < to_date)]
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
Check if LAI or GCF are above their respective thresholds:
(leafAreaIndex_mid_april['value'] > 2).any() | (greenCoverFraction_mid_april['value'] > 0.6).any()
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
Our field has an LAI below 2 in the 2 weeks around mid April and no GCF reading close enough to be taken into account. But we have now the basis for adjusting our recommendation by using Agrimetrics Verde crop observations. Let's broaden our evaluation to nearby Agrimetrics fields with a wheat crop in 2018.
response = requests.post(graphql_url, headers=headers, json={ 'query': ''' query getFieldsWithinRadius($centroid: CoordinateScalar!, $distance: Float!) { fields(geoFilter: {location: {type: Point, coordinates: $centroid}, distance: {LE: $distance}}) { id sownCrop { cropType harvestYear } } } ''', 'variables': { 'centroid': centroid, 'distance': 2000 } # distance in m }) check_results(response) results = response.json() nearby_fields = pd.io.json.json_normalize( results['data']['fields'], record_path=['sownCrop'], meta=['id'], ) nearby_wheat_fields = nearby_fields[(nearby_fields['cropType'] == 'WHEAT') & (nearby_fields['harvestYear'] == 2018)] available_fields = nearby_wheat_fields['id'] available_fields.head()
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
Using the same approach as above, we implement the retrieval of Verde LAI and GCF for the selected fields:
def register(field_id): # Register for CROP_SPECIFIC verde data on our field response = requests.post(graphql_url, headers=headers, json={ 'query': ''' mutation registerCropObservations($fieldId: ID!) { account { premiumData { addCropObservationRegistrations(registrations: { fieldId: $fieldId, layerType: CROP_SPECIFIC, season: SEP2017TOSEP2018, cropType: WHEAT }) { id } } } } ''', 'variables': {'fieldId': field_id} }) check_results(response) # Register for NON_CROP_SPECIFIC verde data on our field response = requests.post(graphql_url, headers=headers, json={ 'query': ''' mutation registerCropObservations($fieldId: ID!) { account { premiumData { addCropObservationRegistrations(registrations: { fieldId: $fieldId, layerType: NON_CROP_SPECIFIC, season: SEP2017TOSEP2018 }) { id } } } } ''', 'variables': {'fieldId': field_id} }) check_results(response) def crop_observations(field_id, attribute): response = requests.post(graphql_url, headers=headers, json={ 'query': ''' query getCropObservations($fieldId: [ID!]!) {{ fields(where: {{id: {{EQ: $fieldId}}}}) {{ cropObservations {{ {attribute} {{ mean dateTime }} }} }} }} '''.format(attribute=attribute), 'variables': {'fieldId': field_id} }) check_results(response) results = response.json() data = pd.io.json.json_normalize( results['data']['fields'], record_path=['cropObservations', attribute], ) data['date_time'] = pd.to_datetime(data['dateTime']) data['value'] = data['mean'] return data[['date_time', 'value']] def has_high_LAI(field_id, leafAreaIndex): if not leafAreaIndex.empty: leafAreaIndex_mid_april = leafAreaIndex[(leafAreaIndex['date_time'] > from_date) & (leafAreaIndex['date_time'] < to_date)] return (leafAreaIndex_mid_april['value'] > 2).any() return False def has_high_GCF(field_id, greenCoverFraction): if not greenCoverFraction.empty: greenCoverFraction_mid_april = greenCoverFraction[(greenCoverFraction['date_time'] > from_date) & (greenCoverFraction['date_time'] < to_date)] return (greenCoverFraction_mid_april['value'] > 0.6).any() return False
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
We then revisit the recommendation algorithm:
def adjusted_lodging_recommendation(field_id, resistance_score, sns_index, sowing_date, sowing_density): register(field_id) leafAreaIndex = crop_observations(field_id, 'leafAreaIndex') greenCoverFraction = crop_observations(field_id, 'greenVegetationCoverFraction') high_LAI = has_high_LAI(field_id, leafAreaIndex) high_GCF = has_high_GCF(field_id, greenCoverFraction) plt.plot(leafAreaIndex['date_time'], leafAreaIndex['value'], label='LAI') plt.plot(greenCoverFraction['date_time'], greenCoverFraction['value'], label='GCF') plt.legend() plt.show() if high_LAI and high_GCF: print('High LAI and GCF were observed around GS31 for this crop, please consider adjusting the recommendation') elif high_LAI: print('High LAI was observed around GS31 for this crop, please consider adjusting the recommendation') elif high_GCF: print('High GCF was observed around GS31 for this crop, please consider adjusting the recommendation') else: print('High LAI and GCF were not observed around GS31 for this crop') return ahdb_lodging_recommendation(resistance_score, sns_index, sowing_date, sowing_density) widgets.interact(adjusted_lodging_recommendation, field_id=widgets.Dropdown(description='Agrimetrics field id', options=available_fields, style=style), resistance_score=widgets.IntSlider(description='Resistance score without PGR', min=1, max=9, style=style), sns_index=widgets.IntSlider(description='SNS index', min=0, max=4, style=style), sowing_date=widgets.SelectionSlider(description='Sowing date', options=sowing_date_scores.keys(), style=style), sowing_density=widgets.SelectionSlider(description='Sowing density', options=sowing_density_scores.keys(), style=style), )
_____no_output_____
MIT
verde-examples/lodging.ipynb
markbneal/api-examples
iloc (위치기반)
data_df.head() data_df.iloc[0, 0] # 아래 코드는 오류를 발생시킴 data_df.iloc['Name', 0] data_df.reset_index()
_____no_output_____
MIT
self/pandas_basic_2.ipynb
Karmantez/Tensorflow_Practice
loc (명칭기반)
data_df data_df.loc['one', 'Name'] data_df_reset.loc[1, 'Name'] data_df_reset.loc[0, 'Name']
_____no_output_____
MIT
self/pandas_basic_2.ipynb
Karmantez/Tensorflow_Practice
불린 인덱싱(Boolean Indexing)
titanic_df = pd.read_csv('titanic_train.csv') titanic_boolean = titanic_df[titanic_df['Age'] > 60] titanic_boolean var1 = titanic_df['Age'] > 60 print('결과:\n', var1) print(type(var1)) titanic_df[titanic_df['Age'] > 60][['Name', 'Age']].head(3) titanic_df[['Name', 'Age']][titanic_df['Age'] > 60].head(3) titanic_df['Age_cat'] = titanic_df['Age'].apply(lambda x : 'Child' if x<=15 else ('Adult' if x <= 60 else 'Elderly')) titanic_df['Age_cat'].value_counts()
_____no_output_____
MIT
self/pandas_basic_2.ipynb
Karmantez/Tensorflow_Practice
Settings
%load_ext autoreload %autoreload 2 %env TF_KERAS = 1 import os sep_local = os.path.sep import sys sys.path.append('..'+sep_local+'..') print(sep_local) os.chdir('..'+sep_local+'..'+sep_local+'..'+sep_local+'..'+sep_local+'..') print(os.getcwd()) import tensorflow as tf print(tf.__version__)
2.1.0
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Dataset loading
dataset_name='Dstripes' images_dir = 'C:\\Users\\Khalid\\Documents\projects\\Dstripes\DS06\\' validation_percentage = 20 valid_format = 'png' from training.generators.file_image_generator import create_image_lists, get_generators imgs_list = create_image_lists( image_dir=images_dir, validation_pct=validation_percentage, valid_imgae_formats=valid_format ) inputs_shape= image_size=(200, 200, 3) batch_size = 32 latents_dim = 32 intermediate_dim = 50 training_generator, testing_generator = get_generators( images_list=imgs_list, image_dir=images_dir, image_size=image_size, batch_size=batch_size, class_mode=None ) import tensorflow as tf train_ds = tf.data.Dataset.from_generator( lambda: training_generator, output_types=tf.float32 , output_shapes=tf.TensorShape((batch_size, ) + image_size) ) test_ds = tf.data.Dataset.from_generator( lambda: testing_generator, output_types=tf.float32 , output_shapes=tf.TensorShape((batch_size, ) + image_size) ) _instance_scale=1.0 for data in train_ds: _instance_scale = float(data[0].numpy().max()) break _instance_scale import numpy as np from collections.abc import Iterable if isinstance(inputs_shape, Iterable): _outputs_shape = np.prod(inputs_shape) _outputs_shape
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Model's Layers definition
units=20 c=50 menc_lays = [ tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'), tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'), tf.keras.layers.Flatten(), # No activation tf.keras.layers.Dense(latents_dim) ] venc_lays = [ tf.keras.layers.Conv2D(filters=units//2, kernel_size=3, strides=(2, 2), activation='relu'), tf.keras.layers.Conv2D(filters=units*9//2, kernel_size=3, strides=(2, 2), activation='relu'), tf.keras.layers.Flatten(), # No activation tf.keras.layers.Dense(latents_dim) ] dec_lays = [ tf.keras.layers.Dense(units=units*c*c, activation=tf.nn.relu), tf.keras.layers.Reshape(target_shape=(c , c, units)), tf.keras.layers.Conv2DTranspose(filters=units, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'), tf.keras.layers.Conv2DTranspose(filters=units*3, kernel_size=3, strides=(2, 2), padding="SAME", activation='relu'), # No activation tf.keras.layers.Conv2DTranspose(filters=3, kernel_size=3, strides=(1, 1), padding="SAME") ]
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Model definition
model_name = dataset_name+'VAE_Convolutional_reconst_1ell_1psnr' experiments_dir='experiments'+sep_local+model_name from training.autoencoding_basic.autoencoders.VAE import VAE as AE inputs_shape=image_size variables_params = \ [ { 'name': 'inference_mean', 'inputs_shape':inputs_shape, 'outputs_shape':latents_dim, 'layers': menc_lays } , { 'name': 'inference_logvariance', 'inputs_shape':inputs_shape, 'outputs_shape':latents_dim, 'layers': venc_lays } , { 'name': 'generative', 'inputs_shape':latents_dim, 'outputs_shape':inputs_shape, 'layers':dec_lays } ] from utils.data_and_files.file_utils import create_if_not_exist _restore = os.path.join(experiments_dir, 'var_save_dir') create_if_not_exist(_restore) _restore #to restore trained model, set filepath=_restore ae = AE( name=model_name, latents_dim=latents_dim, batch_size=batch_size, variables_params=variables_params, filepath=None ) from evaluation.quantitive_metrics.peak_signal_to_noise_ratio import prepare_psnr from statistical.losses_utilities import similarty_to_distance from statistical.ae_losses import expected_loglikelihood_with_lower_bound as ellwlb ae.compile(loss={'x_logits': lambda x_true, x_logits: ellwlb(x_true, x_logits)+similarity_to_distance(prepare_psnr([ae.batch_size]+ae.get_inputs_shape()))(x_true, x_logits)})
Model: "pokemonAE_Dense_reconst_1ell_1ssmi" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= inference_inputs (InputLayer [(None, 200, 200, 3)] 0 _________________________________________________________________ inference (Model) (None, 32) 40961344 _________________________________________________________________ generative (Model) (None, 200, 200, 3) 3962124 _________________________________________________________________ tf_op_layer_x_logits (Tensor [(None, 200, 200, 3)] 0 ================================================================= Total params: 44,923,468 Trainable params: 44,923,398 Non-trainable params: 70 _________________________________________________________________ None
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Callbacks
from training.callbacks.sample_generation import SampleGeneration from training.callbacks.save_model import ModelSaver es = tf.keras.callbacks.EarlyStopping( monitor='loss', min_delta=1e-12, patience=12, verbose=1, restore_best_weights=False ) ms = ModelSaver(filepath=_restore) csv_dir = os.path.join(experiments_dir, 'csv_dir') create_if_not_exist(csv_dir) csv_dir = os.path.join(csv_dir, ae.name+'.csv') csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True) csv_dir image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir') create_if_not_exist(image_gen_dir) sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Model Training
ae.fit( x=train_ds, input_kw=None, steps_per_epoch=int(1e4), epochs=int(1e6), verbose=2, callbacks=[ es, ms, csv_log, sg, gts_mertics, gtu_mertics], workers=-1, use_multiprocessing=True, validation_data=test_ds, validation_steps=int(1e4) )
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Model Evaluation inception_score
from evaluation.generativity_metrics.inception_metrics import inception_score is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200) print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Frechet_inception_distance
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32) print(f'frechet inception distance: {fis_score}')
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
perceptual_path_length_score
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32) print(f'perceptual path length score: {ppl_mean_score}')
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
precision score
from evaluation.generativity_metrics.precision_recall import precision_score _precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200) print(f'precision score: {_precision_score}')
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
recall score
from evaluation.generativity_metrics.precision_recall import recall_score _recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200) print(f'recall score: {_recall_score}')
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Image Generation image reconstruction Training dataset
%load_ext autoreload %autoreload 2 from training.generators.image_generation_testing import reconstruct_from_a_batch from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir') create_if_not_exist(save_dir) reconstruct_from_a_batch(ae, training_generator, save_dir) from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir') create_if_not_exist(save_dir) reconstruct_from_a_batch(ae, testing_generator, save_dir)
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
with Randomness
from training.generators.image_generation_testing import generate_images_like_a_batch from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir') create_if_not_exist(save_dir) generate_images_like_a_batch(ae, training_generator, save_dir) from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir') create_if_not_exist(save_dir) generate_images_like_a_batch(ae, testing_generator, save_dir)
_____no_output_____
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Complete Randomness
from training.generators.image_generation_testing import generate_images_randomly from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'random_synthetic_dir') create_if_not_exist(save_dir) generate_images_randomly(ae, save_dir) from training.generators.image_generation_testing import interpolate_a_batch from utils.data_and_files.file_utils import create_if_not_exist save_dir = os.path.join(experiments_dir, 'interpolate_dir') create_if_not_exist(save_dir) interpolate_a_batch(ae, testing_generator, save_dir)
100%|██████████| 15/15 [00:00<00:00, 19.90it/s]
MIT
notebooks/losses_evaluation/Dstripes/basic/ellwlb/convolutional/VAE/DstripesVAE_Convolutional_reconst_1ellwlb_1psnr.ipynb
Fidan13/Generative_Models
Arbol de decision
data.columns = ["price", "maintenance", "n_doors", "capacity", "size_lug", "safety", "class"] data.sample(10) data.price.replace(("vhigh", "high", "med", "low"), (4, 3, 2, 1), inplace = True) data.maintenance.replace(("vhigh", "high", "med", "low"), (4, 3, 2, 1), inplace = True) data.n_doors.replace(("2", "3", "4", "5more"), (1, 2, 3, 4), inplace = True) data.capacity.replace(("2", "4", "more"), (1, 2, 3), inplace = True) data.size_lug.replace(("small", "med", "big"), (1, 2, 3), inplace = True) data.safety.replace(("low", "med", "high"), (1, 2, 3), inplace = True) data["class"].replace(("unacc", "acc", "good", "vgood"), (1, 2, 3, 4), inplace = True) data.head(5) import numpy as np dataset = data.values X = dataset[:, 0:6] Y = np.asarray(dataset[:,6], dtype = "S6") from sklearn import tree from sklearn.model_selection import train_test_split, cross_val_score from sklearn import metrics X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size=0.2, random_state=0) tr = tree.DecisionTreeClassifier(max_depth = 10) tr.fit(X_Train, Y_Train) y_pred = tr.predict(X_Test) y_pred score = tr.score(X_Test, Y_Test) print("Precisión: %0.4f" % (score))
Precisión: 0.9682
MIT
car.ipynb
karvaroz/CarEvaluation
Modeling and Simulation in PythonCase study.Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import *
_____no_output_____
MIT
soln/oem_soln.ipynb
pmalo46/ModSimPy