markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
We fit the O-C points measured above using MCMC by calling the run_mcmc() function We plot both the fit, as well as the triangle plot showing the two- (and one-)dimensional posterior distributions (these can be suppressed by setting the optional parameters "plot_oc" and "plot_triangle" to False)
sampler, fit_mcmc, oc_sigmas, param_means, param_sigmas, fit_at_points, K =\ octs.run_mcmc(oc_jd, oc_oc, oc_sd, prior_ranges, pos, nsteps = 31000, discard = 1000, thin = 300, processes=1)
100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 31000/31000 [03:08<00:00, 164.32it/s] 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 20000/20000 [00:02<00:00, 8267.13it/s]
MIT
06498_oc.ipynb
gerhajdu/rrl_binaries_1
The estimated LTTE parameters are:
print("Orbital period: {:d} +- {:d} [d]".format(int(param_means[0]), int(param_sigmas[0]))) print("Projected semi-major axis: {:.3f} +- {:.3f} [AU]".format(param_means[2]*173.144633, param_sigmas[2]*173.144633)) print("Eccentricity: {:.3f} +- {:.3f}".format(param_means[3], param_sigmas[3])) print("Argumen of periastron: {:+4d} +- {:d} [deg]".format(int(param_means[4]*180/np.pi), int(param_sigmas[4]*180/np.pi))) print("Periastron passage time: {:d} +- {:d} [HJD-2450000]".format(int(param_means[1]), int(param_sigmas[1]))) print("Period-change rate: {:+.3f} +- {:.3f} [d/Myr] ".format(param_means[7]*365.2422*2e6*period, param_sigmas[7]*365.2422*2e6*period)) print("RV semi-amplitude: {:5.2f} +- {:.2f} [km/s]".format(K[0], K[1])) print("Mass function: {:.5f} +- {:.5f} [M_Sun]".format(K[2], K[3]))
Orbital period: 2803 +- 3 [d] Projected semi-major axis: 2.492 +- 0.010 [AU] Eccentricity: 0.136 +- 0.008 Argumen of periastron: -76 +- 3 [deg] Periastron passage time: 6538 +- 24 [HJD-2450000] Period-change rate: -0.002 +- 0.005 [d/Myr] RV semi-amplitude: 9.76 +- 0.04 [km/s] Mass function: 0.26290 +- 0.00334 [M_Sun]
MIT
06498_oc.ipynb
gerhajdu/rrl_binaries_1
Consensus Optimization This notebook contains the code for the toy experiment in the paper [The Numerics of GANs](https://arxiv.org/abs/1705.10461).
%load_ext autoreload %autoreload 2 import tensorflow as tf from tensorflow.contrib import slim import numpy as np import scipy as sp from scipy import stats from matplotlib import pyplot as plt import sys, os from tqdm import tqdm_notebook tf.reset_default_graph() def kde(mu, tau, bbox=[-5, 5, -5, 5], save_file="", xlabel="", ylabel="", cmap='Blues'): values = np.vstack([mu, tau]) kernel = sp.stats.gaussian_kde(values) fig, ax = plt.subplots() ax.axis(bbox) ax.set_aspect(abs(bbox[1]-bbox[0])/abs(bbox[3]-bbox[2])) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) plt.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelbottom='off') # labels along the bottom edge are off plt.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left='off', # ticks along the bottom edge are off right='off', # ticks along the top edge are off labelleft='off') # labels along the bottom edge are off xx, yy = np.mgrid[bbox[0]:bbox[1]:300j, bbox[2]:bbox[3]:300j] positions = np.vstack([xx.ravel(), yy.ravel()]) f = np.reshape(kernel(positions).T, xx.shape) cfset = ax.contourf(xx, yy, f, cmap=cmap) if save_file != "": plt.savefig(save_file, bbox_inches='tight') plt.close(fig) else: plt.show() def complex_scatter(points, bbox=None, save_file="", xlabel="real part", ylabel="imaginary part", cmap='Blues'): fig, ax = plt.subplots() if bbox is not None: ax.axis(bbox) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) xx = [p.real for p in points] yy = [p.imag for p in points] plt.plot(xx, yy, 'X') plt.grid() if save_file != "": plt.savefig(save_file, bbox_inches='tight') plt.close(fig) else: plt.show() # Parameters learning_rate = 1e-4 reg_param = 10. batch_size = 512 z_dim = 16 sigma = 0.01 method = 'conopt' divergence = 'standard' outdir = os.path.join('gifs', method) niter = 50000 n_save = 500 bbox = [-1.6, 1.6, -1.6, 1.6] do_eigen = True # Target distribution mus = np.vstack([np.cos(2*np.pi*k/8), np.sin(2*np.pi*k/8)] for k in range(batch_size)) x_real = mus + sigma*tf.random_normal([batch_size, 2]) # Model def generator_func(z): net = slim.fully_connected(z, 16) net = slim.fully_connected(net, 16) net = slim.fully_connected(net, 16) net = slim.fully_connected(net, 16) x = slim.fully_connected(net, 2, activation_fn=None) return x def discriminator_func(x): # Network net = slim.fully_connected(x, 16) net = slim.fully_connected(net, 16) net = slim.fully_connected(net, 16) net = slim.fully_connected(net, 16) logits = slim.fully_connected(net, 1, activation_fn=None) out = tf.squeeze(logits, -1) return out generator = tf.make_template('generator', generator_func) discriminator = tf.make_template('discriminator', discriminator_func) z = tf.random_normal([batch_size, z_dim]) x_fake = generator(z) d_out_real = discriminator(x_real) d_out_fake = discriminator(x_fake) # Loss if divergence == 'standard': d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_out_real, labels=tf.ones_like(d_out_real) )) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_out_fake, labels=tf.zeros_like(d_out_fake) )) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_out_fake, labels=tf.ones_like(d_out_fake) )) elif divergence == 'JS': d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_out_real, labels=tf.ones_like(d_out_real) )) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_out_fake, labels=tf.zeros_like(d_out_fake) )) d_loss = d_loss_real + d_loss_fake g_loss = -d_loss elif divergence == 'indicator': d_loss = tf.reduce_mean(d_out_real - d_out_fake) g_loss = -d_loss else: raise NotImplementedError g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator') d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator') optimizer = tf.train.RMSPropOptimizer(learning_rate, use_locking=True) # optimizer = tf.train.GradientDescentOptimizer(learning_rate, use_locking=True) # Compute gradients d_grads = tf.gradients(d_loss, d_vars) g_grads = tf.gradients(g_loss, g_vars) # Merge variable and gradient lists variables = d_vars + g_vars grads = d_grads + g_grads if method == 'simga': apply_vec = list(zip(grads, variables)) elif method == 'conopt': # Reguliarizer reg = 0.5 * sum( tf.reduce_sum(tf.square(g)) for g in grads ) # Jacobian times gradiant Jgrads = tf.gradients(reg, variables) apply_vec = [ (g + reg_param * Jg, v) for (g, Jg, v) in zip(grads, Jgrads, variables) if Jg is not None ] else: raise NotImplementedError with tf.control_dependencies([g for (g, v) in apply_vec]): train_op = optimizer.apply_gradients(apply_vec) if do_eigen: jacobian_rows = [] g_grads = tf.gradients(g_loss, g_vars) g_grads = [-g for g in g_grads] d_grads = tf.gradients(d_loss, d_vars) d_grads = [-g for g in d_grads] for g in tqdm_notebook(g_grads + d_grads): g = tf.reshape(g, [-1]) len_g = int(g.get_shape()[0]) for i in tqdm_notebook(range(len_g)): g_row = tf.gradients(g[i], g_vars) d_row = tf.gradients(g[i], d_vars) jacobian_rows.append(g_row + d_row) def get_J(J_rows): J_rows_linear = [np.concatenate([g.flatten() for g in row]) for row in J_rows] J = np.array(J_rows_linear) return J def process_J(J, save_file, bbox=None): eig, eigv = np.linalg.eig(J) eig_real = np.array([p.real for p in eig]) complex_scatter(eig, save_file=save_file, bbox=bbox) def process_J_conopt(J, reg, save_file, bbox=None): J2 = J - reg * np.dot(J.T, J) eig, eigv = np.linalg.eig(J2) eig_real = np.array([p.real for p in eig]) complex_scatter(eig, save_file=save_file, bbox=bbox) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # Real distribution x_out = np.concatenate([sess.run(x_real) for i in range(5)], axis=0) kde(x_out[:, 0], x_out[:, 1], bbox=bbox, cmap='Reds', save_file='gt.png') if not os.path.exists(outdir): os.makedirs(outdir) eigrawdir = os.path.join(outdir, 'eigs_raw') if not os.path.exists(eigrawdir): os.makedirs(eigrawdir) eigdir = os.path.join(outdir, 'eigs') if not os.path.exists(eigdir): os.makedirs(eigdir) eigdir_conopt = os.path.join(outdir, 'eigs_conopt') if not os.path.exists(eigdir_conopt): os.makedirs(eigdir_conopt) ztest = [np.random.randn(batch_size, z_dim) for i in range(5)] progress = tqdm_notebook(range(niter)) if do_eigen: J_rows = sess.run(jacobian_rows) J = get_J(J_rows) for i in progress: sess.run(train_op) d_loss_out, g_loss_out = sess.run([d_loss, g_loss]) if do_eigen and i % 500 == 0: J[:, :] = 0. for k in range(10): J_rows = sess.run(jacobian_rows) J += get_J(J_rows)/10. with open(os.path.join(eigrawdir, 'J_%d.npz' % i), 'wb') as f: np.save(f, J) progress.set_description('d_loss = %.4f, g_loss =%.4f' % (d_loss_out, g_loss_out)) if i % n_save == 0: x_out = np.concatenate([sess.run(x_fake, feed_dict={z: zt}) for zt in ztest], axis=0) kde(x_out[:, 0], x_out[:, 1], bbox=bbox, save_file=os.path.join(outdir,'%d.png' % i)) import re import glob import matplotlib matplotlib.rcParams.update({'font.size': 16}) pattern = r'J_(?P<it>0).npz' bbox = [-3.5, 0.75, -1.2, 1.2] eigrawdir = os.path.join(outdir, 'eigs_raw') if not os.path.exists(eigrawdir): os.makedirs(eigrawdir) eigdir = os.path.join(outdir, 'eigs') if not os.path.exists(eigdir): os.makedirs(eigdir) eigdir_conopt = os.path.join(outdir, 'eigs_conopt') if not os.path.exists(eigdir_conopt): os.makedirs(eigdir_conopt) out_files = glob.glob(os.path.join(eigrawdir, '*.npz')) matches = [re.fullmatch(pattern, os.path.basename(s)) for s in out_files] matches = [m for m in matches if m is not None] for m in tqdm_notebook(matches): it = int(m.group('it')) J = np.load(os.path.join(eigrawdir, m.group())) process_J(J, save_file=os.path.join(eigdir, '%d.png' % it), bbox=bbox) process_J_conopt(J, reg=reg_param, save_file=os.path.join(eigdir_conopt, '%d.png' % it), bbox=bbox)
MIT
notebooks/mog-eigval-dist.ipynb
LMescheder/TheNumericsOfGANs
Getting Started with Tensorflow
import tensorflow as tf # Create TensorFlow object called tensor hello_constant = tf.constant('Hello World!') with tf.Session() as sess: # Run the tf.constant operation in the session output = sess.run(hello_constant) print(output); A = tf.constant(1234) B = tf.constant([123, 456, 789]) C = tf.constant([ [123, 145, 789], [222, 333, 444] ]) print(A) # A "TensorFlow Session", as shown above, is an environment for running a graph. # The session is in charge of allocating the operations to GPU(s) and/or CPU(s), including remote machines. # Let’s see how you use it. with tf.Session() as sess: output = sess.run(A) print(output) # Sadly you can’t just set x to your dataset and put it in TensorFlow, # because over time you'll want your TensorFlow model to take in different datasets with different parameters. # You need tf.placeholder()! # tf.placeholder() returns a tensor that gets its value from data passed to the tf.session.run() function, # allowing you to set the input right before the session runs. # Use the feed_dict parameter in tf.session.run() to set the placeholder tensor. # The above example shows the tensor x being set to the string "Hello, world". # It's also possible to set more than one tensor using feed_dict as shown below. x = tf.placeholder(tf.string) with tf.Session() as sess: output = sess.run(x, feed_dict={x: "Hello World"}) x = tf.placeholder(tf.string) y = tf.placeholder(tf.int32) z = tf.placeholder(tf.float32) with tf.Session() as sess: output = sess.run(x, feed_dict={x: 'Test String', y: 123, z: 45.67}) # Applying math tf.multiply() tf.subtract() tf.add() # Sometimes the inputs have to the casted in that regard tf.cast(tf.const(1), tf.float64) # constants and placeholder are not mutable!!! # --> variables: tf.Variable() # --> needs to be initialized by tf.global_variables_initializer() # --> good practice is to randomly initialze weights: tf_truncated_normal() # Example for classification: weights = tf.Variable(tf.truncated_normal((n_features, n_labels))) # Zero method to initialize any variable with zeros (e.g the bias terms) bias = tf.Variable(tf.zeros(n_labels)) # Multiplication for matrices: tf.matmul() # Softmax function tf.nn.softmax() # Arbitrary dimension placeholders # Features and Labels (e.g. for Neural Networks) features = tf.placeholder(tf.float32, [None, n_input]) labels = tf.placeholder(tf.float32, [None, n_classes]) # Relu function (Activation function) tf.nn.relu() # Sticking hidden layers together # Hidden Layer with ReLU activation function hidden_layer = tf.add(tf.matmul(features, hidden_weights), hidden_biases) hidden_layer = tf.nn.relu(hidden_layer) output = tf.add(tf.matmul(hidden_layer, output_weights), output_biases) # Variables have to be initialized as well in order to use them in the session tf.global_variables_initializer()
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Build a Neural Network with Tensorflow
# Coding example for building a neural network with tensorflow # Quiz Solution import tensorflow as tf output = None hidden_layer_weights = [ [0.1, 0.2, 0.4], [0.4, 0.6, 0.6], [0.5, 0.9, 0.1], [0.8, 0.2, 0.8]] out_weights = [ [0.1, 0.6], [0.2, 0.1], [0.7, 0.9]] # Weights and biases weights = [ tf.Variable(hidden_layer_weights), tf.Variable(out_weights)] biases = [ tf.Variable(tf.zeros(3)), tf.Variable(tf.zeros(2))] # Input features = tf.Variable([[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0], [11.0, 12.0, 13.0, 14.0]]) # TODO: Create Model hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0]) hidden_layer = tf.nn.relu(hidden_layer) logits = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1]) # TODO: save and print session results on variable output with tf.Session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits) print(output)
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Deep Neural Networks in Tensorflow
# For stacking muliple layers --> Deep NN # Store layers weight & bias weights = { 'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])), 'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes])) } biases = { 'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])), 'out': tf.Variable(tf.random_normal([n_classes])) } # Example for input an image #The MNIST data is made up of 28px by 28px images with a single channel. # The tf.reshape() function above reshapes the 28px by 28px matrices in x into row vectors of 784px. # tf Graph input x = tf.placeholder("float", [None, 28, 28, 1]) y = tf.placeholder("float", [None, n_classes]) x_flat = tf.reshape(x, [-1, n_input]) # Builidng the model # Hidden layer with RELU activation layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']),\ biases['hidden_layer']) layer_1 = tf.nn.relu(layer_1) # Output layer with linear activation logits = tf.add(tf.matmul(layer_1, weights['out']), biases['out']) # Define the optimizer and the cost function # Define loss and optimizer cost = tf.reduce_mean(\ tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\ .minimize(cost) # How to run the actual session in TF # Initializing the variables init = tf.global_variables_initializer() # Launch the graph with tf.Session() as sess: sess.run(init) # Training cycle for epoch in range(training_epochs): total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_x, batch_y = mnist.train.next_batch(batch_size) # Run optimization op (backprop) and cost op (to get loss value) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Saving Variables and trained Models and load them backYou save the particular **session** in a file
import tensorflow as tf # The file path to save the data save_file = './model.ckpt' # Two Tensor Variables: weights and bias weights = tf.Variable(tf.truncated_normal([2, 3])) bias = tf.Variable(tf.truncated_normal([3])) # Class used to save and/or restore Tensor Variables saver = tf.train.Saver() with tf.Session() as sess: # Initialize all the Variables sess.run(tf.global_variables_initializer()) # Show the values of weights and bias print('Weights:') print(sess.run(weights)) print('Bias:') print(sess.run(bias)) # Save the model saver.save(sess, save_file) # Loading the variables back # Remove the previous weights and bias tf.reset_default_graph() # Two Variables: weights and bias weights = tf.Variable(tf.truncated_normal([2, 3])) bias = tf.Variable(tf.truncated_normal([3])) # Class used to save and/or restore Tensor Variables saver = tf.train.Saver() with tf.Session() as sess: # Load the weights and bias saver.restore(sess, save_file) # Show the values of weights and bias print('Weight:') print(sess.run(weights)) print('Bias:') print(sess.run(bias))
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
... same works for models. Just train a NN like shown above and save the session afterwards Dropout for regularization in Tensorflow
# In tensorflow, dropout is just another "layer" in the model #During training, a good starting value for keep_prob is 0.5. #During testing, use a keep_prob value of 1.0 to keep all units and maximize the power of the model. keep_prob = tf.placeholder(tf.float32) # probability to keep units hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0]) hidden_layer = tf.nn.relu(hidden_layer) hidden_layer = tf.nn.dropout(hidden_layer, keep_prob) logits = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1])
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Convolutinal Neural Network (CNN)
# Note the output shape of conv will be [1, 16, 16, 20]. # It's 4D to account for batch size, but more importantly, it's not [1, 14, 14, 20]. # This is because the padding algorithm TensorFlow uses is not exactly the same as the one above. # An alternative algorithm is to switch padding from 'SAME' to 'VALID' input = tf.placeholder(tf.float32, (None, 32, 32, 3)) filter_weights = tf.Variable(tf.truncated_normal((8, 8, 3, 20))) # (height, width, input_depth, output_depth) filter_bias = tf.Variable(tf.zeros(20)) strides = [1, 2, 2, 1] # (batch, height, width, depth) padding = 'SAME' conv = tf.nn.conv2d(input, filter_weights, strides, padding) + filter_bias
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Example code for constructing a CNN
# Load data set # Batch, scale and one-hot-encode it # Set Parameters from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets(".", one_hot=True, reshape=False) import tensorflow as tf # Parameters learning_rate = 0.00001 epochs = 10 batch_size = 128 # Number of samples to calculate validation and accuracy # Decrease this if you're running out of memory to calculate accuracy test_valid_size = 256 # Network Parameters n_classes = 10 # MNIST total classes (0-9 digits) dropout = 0.75 # Dropout, probability to keep units # Define and store layers and biases # Store layers weight & bias weights = { 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), 'out': tf.Variable(tf.random_normal([1024, n_classes]))} biases = { 'bc1': tf.Variable(tf.random_normal([32])), 'bc2': tf.Variable(tf.random_normal([64])), 'bd1': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([n_classes]))} # Apply Convolution (i.e. create a convolution layer) # The tf.nn.conv2d() function computes the convolution against weight W def conv2d(x, W, b, strides=1): x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) #In TensorFlow, strides is an array of 4 elements; the first element in this array indicates the stride for batch #and last element indicates stride for features. #It's good practice to remove the batches or features you want to skip from the data set rather than use a stride to skip them. #You can always set the first and last element to 1 in strides in order to use all batches and features. #The middle two elements are the strides for height and width respectively. #I've mentioned stride as one number because you usually have a square stride where height = width. #When someone says they are using a stride of 3, they usually mean tf.nn.conv2d(x, W, strides=[1, 3, 3, 1]) # Max Pooling def maxpool2d(x, k=2): return tf.nn.max_pool( x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') # The tf.nn.max_pool() function does exactly what you would expect, # it performs max pooling with the ksize parameter as the size of the filter. # Sticking the model together def conv_net(x, weights, biases, dropout): # Layer 1 - 28*28*1 to 14*14*32 conv1 = conv2d(x, weights['wc1'], biases['bc1']) conv1 = maxpool2d(conv1, k=2) # Layer 2 - 14*14*32 to 7*7*64 conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) conv2 = maxpool2d(conv2, k=2) # Fully connected layer - 7*7*64 to 1024 fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) # The reshape step is to flatten the filter layers fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) fc1 = tf.nn.relu(fc1) fc1 = tf.nn.dropout(fc1, dropout) # Output Layer - class prediction - 1024 to 10 out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) return out # Run the session in tensorflow # tf Graph input x = tf.placeholder(tf.float32, [None, 28, 28, 1]) y = tf.placeholder(tf.float32, [None, n_classes]) keep_prob = tf.placeholder(tf.float32) # Model logits = conv_net(x, weights, biases, keep_prob) # Define loss and optimizer cost = tf.reduce_mean(\ tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\ .minimize(cost) # Accuracy correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables init = tf. global_variables_initializer() # Launch the graph with tf.Session() as sess: sess.run(init) for epoch in range(epochs): for batch in range(mnist.train.num_examples//batch_size): batch_x, batch_y = mnist.train.next_batch(batch_size) sess.run(optimizer, feed_dict={ x: batch_x, y: batch_y, keep_prob: dropout}) # Calculate batch loss and accuracy loss = sess.run(cost, feed_dict={ x: batch_x, y: batch_y, keep_prob: 1.}) valid_acc = sess.run(accuracy, feed_dict={ x: mnist.validation.images[:test_valid_size], y: mnist.validation.labels[:test_valid_size], keep_prob: 1.}) print('Epoch {:>2}, Batch {:>3} -' 'Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format( epoch + 1, batch + 1, loss, valid_acc)) # Calculate Test Accuracy test_acc = sess.run(accuracy, feed_dict={ x: mnist.test.images[:test_valid_size], y: mnist.test.labels[:test_valid_size], keep_prob: 1.}) print('Testing Accuracy: {}'.format(test_acc))
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
LeNet Architecture Load DataLoad the MNIST data, which comes pre-loaded with TensorFlow.You do not need to modify this section.
from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", reshape=False) X_train, y_train = mnist.train.images, mnist.train.labels X_validation, y_validation = mnist.validation.images, mnist.validation.labels X_test, y_test = mnist.test.images, mnist.test.labels assert(len(X_train) == len(y_train)) assert(len(X_validation) == len(y_validation)) assert(len(X_test) == len(y_test)) print() print("Image Shape: {}".format(X_train[0].shape)) print() print("Training Set: {} samples".format(len(X_train))) print("Validation Set: {} samples".format(len(X_validation))) print("Test Set: {} samples".format(len(X_test)))
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Split up data into training, validation and test set
import numpy as np # Pad images with 0s X_train = np.pad(X_train, ((0,0),(2,2),(2,2),(0,0)), 'constant') X_validation = np.pad(X_validation, ((0,0),(2,2),(2,2),(0,0)), 'constant') X_test = np.pad(X_test, ((0,0),(2,2),(2,2),(0,0)), 'constant') print("Updated Image Shape: {}".format(X_train[0].shape))
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Visualize DataView a sample from the dataset.You do not need to modify this section.
import random import numpy as np import matplotlib.pyplot as plt %matplotlib inline index = random.randint(0, len(X_train)) image = X_train[index].squeeze() plt.figure(figsize=(1,1)) plt.imshow(image, cmap="gray") print(y_train[index])
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Preprocess DataShuffle the training data.You do not need to modify this section.
from sklearn.utils import shuffle X_train, y_train = shuffle(X_train, y_train)
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Setup TensorFlowThe `EPOCH` and `BATCH_SIZE` values affect the training speed and model accuracy.
import tensorflow as tf EPOCHS = 10 BATCH_SIZE = 128
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
InputThe LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since MNIST images are grayscale, C is 1 in this case. Architecture**Layer 1: Convolutional.** The output shape should be 28x28x6.**Activation.** Your choice of activation function.**Pooling.** The output shape should be 14x14x6.**Layer 2: Convolutional.** The output shape should be 10x10x16.**Activation.** Your choice of activation function.**Pooling.** The output shape should be 5x5x16.**Flatten.** Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using `tf.contrib.layers.flatten`, which is already imported for you.**Layer 3: Fully Connected.** This should have 120 outputs.**Activation.** Your choice of activation function.**Layer 4: Fully Connected.** This should have 84 outputs.**Activation.** Your choice of activation function.**Layer 5: Fully Connected (Logits).** This should have 10 outputs. OutputReturn the result of the 2nd fully connected layer.
from tensorflow.contrib.layers import flatten def LeNet(x): # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer mu = 0 sigma = 0.1 weights = { 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 6])), 'wc2': tf.Variable(tf.random_normal([5, 5, 6, 16])), 'wd1': tf.Variable(tf.random_normal([400, 120])), 'wd2': tf.Variable(tf.random_normal([120, 84])), 'out': tf.Variable(tf.random_normal([84, 10]))} biases = { 'bc1': tf.Variable(tf.random_normal([6])), 'bc2': tf.Variable(tf.random_normal([16])), 'bd1': tf.Variable(tf.random_normal([120])), 'bd2': tf.Variable(tf.random_normal([84])), 'out': tf.Variable(tf.random_normal([10]))} # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6. layer1 = tf.nn.conv2d(x, weights['wc1'], strides=[1, 1, 1, 1], padding="VALID") layer1 = tf.nn.bias_add(layer1, biases['bc1']) # TODO: Activation. layer1 = tf.nn.relu(layer1) # TODO: Pooling. Input = 28x28x6. Output = 14x14x6. layer1 = tf.nn.max_pool(layer1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") # TODO: Layer 2: Convolutional. Output = 10x10x16. layer2 = tf.nn.conv2d(layer1, weights['wc2'], strides=[1, 1, 1, 1], padding="VALID") layer2 = tf.nn.bias_add(layer2, biases['bc2']) # TODO: Activation. layer2 = tf.nn.relu(layer2) # TODO: Pooling. Input = 10x10x16. Output = 5x5x16. layer2 = tf.nn.max_pool(layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") # TODO: Flatten. Input = 5x5x16. Output = 400. flattenedLayer2 = tf.contrib.layers.flatten(layer2) # TODO: Layer 3: Fully Connected. Input = 400. Output = 120. layer3 = tf.add(tf.matmul(flattenedLayer2, weights['wd1']), biases['bd1']) # TODO: Activation. layer3 = tf.nn.relu(layer3) # TODO: Layer 4: Fully Connected. Input = 120. Output = 84. layer4 = tf.add(tf.matmul(layer3, weights['wd2']), biases['bd2']) # TODO: Activation. layer4 = tf.nn.relu(layer4) # TODO: Layer 5: Fully Connected. Input = 84. Output = 10. logits = tf.add(tf.matmul(layer4, weights['out']), biases['out']) return logits
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Features and LabelsTrain LeNet to classify [MNIST](http://yann.lecun.com/exdb/mnist/) data.`x` is a placeholder for a batch of input images.`y` is a placeholder for a batch of output labels.
x = tf.placeholder(tf.float32, (None, 32, 32, 1)) y = tf.placeholder(tf.int32, (None)) one_hot_y = tf.one_hot(y, 10)
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Training PipelineCreate a training pipeline that uses the model to classify MNIST data.
rate = 0.001 logits = LeNet(x) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation)
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Model EvaluationEvaluate how well the loss and accuracy of the model for a given dataset.
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver() def evaluate(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Train the ModelRun the training data through the training pipeline to train the model.Before each epoch, shuffle the training set.After each epoch, measure the loss and accuracy of the validation set.Save the model after training.
with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y}) validation_accuracy = evaluate(X_validation, y_validation) print("EPOCH {} ...".format(i+1)) print("Validation Accuracy = {:.3f}".format(validation_accuracy)) print() saver.save(sess, './lenet') print("Model saved")
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
Evaluate the Model (on the test set)Once you are completely satisfied with your model, evaluate the performance of the model on the test set.Be sure to only do this once!If you were to measure the performance of your trained model on the test set, then improve your model, and then measure the performance of your model on the test set again, that would invalidate your test results. You wouldn't get a true measure of how well your model would perform against real data.
with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) test_accuracy = evaluate(X_test, y_test) print("Test Accuracy = {:.3f}".format(test_accuracy))
_____no_output_____
MIT
TensorFlowIntro/.ipynb_checkpoints/TensorFlowIntroduction-checkpoint.ipynb
dschmoeller/03TrafficSignClassifierCNN
pip install pydicom # Import tensorflow import logging import tensorflow as tf import keras.backend as K # Helper libraries import math import numpy as np import pandas as pd import pydicom import os import sys import time # Imports for dataset manipulation from sklearn.model_selection import train_test_split from keras.preprocessing.image import ImageDataGenerator # Improve progress bar display import tqdm import tqdm.auto tqdm.tqdm = tqdm.auto.tqdm #tf.enable_eager_execution() #comment this out if causing errors logger = tf.get_logger() logger.setLevel(logging.DEBUG) ### SET MODEL CONFIGURATIONS ### # Data Loading CSV_PATH = 'label_data/CCC_clean.csv' IMAGE_BASE_PATH = './data/' test_size_percent = 0.15 # percent of total data reserved for testing print(IMAGE_BASE_PATH) # Data Augmentation mirror_im = False # Loss lambda_coord = 5 epsilon = 0.00001 # Learning step_size = 0.00001 BATCH_SIZE = 5 num_epochs = 1 # Saving shape_path = 'trained_model/model_shape.json' weight_path = 'trained_model/model_weights.h5' # TensorBoard tb_graph = False tb_update_freq = 'batch' ### GET THE DATASET AND PREPROCESS IT ### print("Loading and processing data\n") data_frame = pd.read_csv(CSV_PATH) """ Construct numpy ndarrays from the loaded csv to use as training and testing datasets. """ # zip all points for each image label together into a tuple points = zip(data_frame['start_x'], data_frame['start_y'], data_frame['end_x'], data_frame['end_y']) img_paths = data_frame['imgPath'] def path_to_image(path): """ Load a matrix of pixel values from the DICOM image stored at the input path. @param path - string, relative path (from IMAGE_BASE_PATH) to a DICOM file @return image - numpy ndarray (int), 2D matrix of pixel values of the image loaded from path """ # load image from path as numpy array image = pydicom.dcmread(os.path.join(IMAGE_BASE_PATH, path)).pixel_array return image # normalize dicom image pixel values to 0-1 range def normalize_image(img): """ Normalize the pixel values in img to be withing the range of 0 to 1. @param img - numpy ndarray, 2D matrix of pixel values @return img - numpy ndarray (float), 2D matrix of pixel values, every element is valued between 0 and 1 (inclusive) """ img = img.astype(np.float32) img += abs(np.amin(img)) # account for negatives img /= np.amax(img) return img # normalize the ground truth bounding box labels wrt image dimensions def normalize_points(points): """ Normalize values in points to be within the range of 0 to 1. @param points - 1x4 tuple, elements valued in the range of 0 512 (inclusive). This is known from the nature of the dataset used in this program @return - 1x4 numpy ndarray (float), elements valued in range 0 to 1 (inclusive) """ imDims = 512.0 # each image in our dataset is 512x512 points = list(points) for i in range(len(points)): points[i] /= imDims return np.array(points).astype(np.float32) """ Convert the numpy array of paths to the DICOM images to pixel matrices that have been normalized to a 0-1 range. Also normalize the bounding box labels to make it easier for the model to predict on them. """ # apply preprocessing functions points = map(normalize_points, points) imgs = map(path_to_image, img_paths) imgs = map(normalize_image, imgs) print(list(imgs)) # reshape input image data to 4D shape (as expected by the model) # and cast all data to np arrays (just in case) imgs = np.array(imgs) points = np.array(points) imgs = imgs.reshape((-1, 512, 512, 1))
_____no_output_____
Apache-2.0
ipynbs/reshape_demo.ipynb
zbytes/fsqs-tips-tricks-notes
20 Sept 2019 RULESDate: Level 2 heading Example Heading: Level 3 heading Method Heading: Level 4 heading References 1. [Forester_W._Isen;_J._Moura]_DSP_for_MATLAB_and_La Volume II(z-lib.org)2. H. K. Dass, Advanced Engineering Mathematics3. [Forester_W._Isen;_J._Moura]_DSP_for_MATLAB_and_La Volume I(z-lib.org)4. [John_G_Proakis;_Dimitris_G_Manolakis]_Digital_Sig(z-lib.org) Imports
import numpy as np from sympy import oo import math import sympy as sp import matplotlib.pyplot as plt import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D from IPython.display import display from IPython.display import display_latex from sympy import latex import math from scipy import signal from datetime import datetime
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Setup
sp.init_printing(use_latex = True) z, f, i = sp.symbols('z f i') x, k = sp.symbols('x k')
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Methods
# Usage: display_equation('u_x', x) def display_equation(idx, symObj): if(isinstance(idx, str)): eqn = '\\[' + idx + ' = ' + latex(symObj) + '\\]' display_latex(eqn, raw=True) else: eqn = '\\[' + latex(idx) + ' = ' + latex(symObj) + '\\]' display_latex(eqn, raw=True) return # Usage: display_full_latex('u_x') def display_full_latex(idx): if(isinstance(idx, str)): eqn = '\\[' + idx + '\\]' display_latex(eqn, raw=True) else: eqn = '\\[' + latex(idx) + '\\]' display_latex(eqn, raw=True) return # Usage: display_full_latex('u_x') def display_full_latex(idx): if(isinstance(idx, str)): eqn = '\\[' + idx + '\\]' display_latex(eqn, raw=True) else: eqn = '\\[' + latex(idx) + '\\]' display_latex(eqn, raw=True) return def ztrans(a, b): F = sp.summation(f/z**k, ( k, a, b )) return F def display_ztrans(f, k, limits = (-4, 4)): F = sp.summation(f/z**k, ( k, -oo, oo )) display_equation('f(k)', f) display_equation('F(k)_{\infty}', F) F = sp.summation(f/z**k, (k, limits[0], limits[1])) display_equation('F(k)_{'+ str(limits[0]) + ',' + str(limits[1]) + '}', F) return def sum_of_GP(a, r): return sp.simplify(a/(1-r)) # Credit: https://www.dsprelated.com/showcode/244.php def zplane(b,a,filename=None): """Plot the complex z-plane given a transfer function. """ # get a figure/plot ax = plt.subplot(111) # create the unit circle uc = patches.Circle((0,0), radius=1, fill=False, color='black', ls='dashed') ax.add_patch(uc) # The coefficients are less than 1, normalize the coeficients if np.max(b) > 1: kn = np.max(b) b = b/float(kn) else: kn = 1 if np.max(a) > 1: kd = np.max(a) a = a/float(kd) else: kd = 1 # Get the poles and zeros p = np.roots(a) z = np.roots(b) k = kn/float(kd) # Plot the zeros and set marker properties t1 = plt.plot(z.real, z.imag, 'go', ms=10) plt.setp( t1, markersize=10.0, markeredgewidth=1.0, markeredgecolor='k', markerfacecolor='g') # Plot the poles and set marker properties t2 = plt.plot(p.real, p.imag, 'rx', ms=10) plt.setp( t2, markersize=12.0, markeredgewidth=3.0, markeredgecolor='b', markerfacecolor='b') ax.spines['left'].set_position('center') ax.spines['bottom'].set_position('center') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) # set the ticks r = 1.5; plt.axis('scaled'); plt.axis([-r, r, -r, r]) ticks = [-1, -.5, .5, 1]; plt.xticks(ticks); plt.yticks(ticks) if filename is None: plt.show() else: plt.savefig(filename) return z, p, k
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Z Transform
display_full_latex('X(z) = \sum_{-\infty}^{\infty} x[n]z^{-n}')
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Tests Convert Symbolic to Numeric
f = x**2 f = sp.lambdify(x, f, 'numpy') f(2) display_equation('f(x)', sp.summation(3**k, ( k, -oo, oo ))) display_equation('F(z)', sp.summation(3**k/z**k, ( k, -oo, oo )))
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Partial Fractions
f = 1/(x**2 + x - 6) display_equation('f(x)', f) f = sp.apart(f) display_equation('f(x)_{canonical}', f)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Piecewise
f1 = 5**k f2 = 3**k f = sp.Piecewise((f1, k < 0), (f2, k >= 0)) display_equation('f(k)', f)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
21 Sept 2019 Positive Time / Causal
f1 = k **2 f2 = 3**k f = f1 * sp.Heaviside(k) # or #f = sp.Piecewise((0, k < 0), (f1, k >= 0)) display_equation('f(k)', f) sp.plot(f, (k, -10, 10))
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Stem Plot
x = np.linspace(0.1, 2 * np.pi, 41) y = np.sin(x) plt.stem(x, y) plt.show()
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
zplane Plot
b = np.array([1, 1, 0, 0]) a = np.array([1, 1, 1]) zplane(b,a)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Filter
g = (1 + z**-2)/(1-1.2*z**-1+0.81*z**-2) display_equation('F(z)', g) b = np.array([1,1]) a = np.array([1,-1.2,0.81]) x = np.ones((1, 8)) # Response y = signal.lfilter(b, a, x) # Reverse signal.lfilter(a, b, y)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
[1] Example 2.2
radFreq = np.arange(0, 2*np.pi, 2*np.pi/499) g = np.exp(1j*radFreq) Zxform= 1/(1-0.7*g**(-1)) plt.plot(radFreq/np.pi,abs(Zxform)) plt.title('Graph') plt.xlabel('Frequency, Units of Ο€') plt.ylabel('H(x)') plt.grid(True) plt.show()
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
[2] Chapter 19, Example 5
f = 3**(-k) display_ztrans(f, k, (-4, 3))
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
[2] Example 9
f1 = 5**k f2 = 3**k f = sp.Piecewise((f1, k < 0), (f2, k >= 0)) display_ztrans(f, k, (-3, 3)) p = sum_of_GP(z/5, z/5) q = sum_of_GP(1, 3/z) display_equation('F(z)', sp.ratsimp(q + p))
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
28 Sept, 2019 [3] Folding formula fperceived = [ f - fsampling * NINT( f / fsampling ) ] 9 Oct, 2019 [3] Section 4.3 Equations
display_full_latex('F \\rightarrow analog') display_full_latex('f \\rightarrow discrete') display_full_latex('Nyquist frequency = F_s') display_full_latex('Folding frequency = \\frac{F_s}{2}') display_full_latex('F_{max} = \\frac{F_s}{2}') display_full_latex('T = \\frac{1}{F_s}') display_full_latex('f = \\frac{F}{F_s}') display_full_latex('f_k = \\frac{k}{N}') display_full_latex('F_k = F_0 + kF_s, k = \\pm 1, \\pm 2, ...') display_full_latex('x_a(t) = Asin(2\\pi Ft + \\theta)') display_full_latex('x(n) = Asin(\\frac{2\\pi nk}{N} + \\theta)') display_full_latex('x(n) = Asin(2\\pi fn + \\theta)') display_full_latex('x(n) = x_a (nT) = Acos(2\\pi \\frac{F_0 + kF_s}{F_s} n + \\theta)') display_full_latex('t = nT') display_full_latex('\\Omega = 2\\pi F') display_full_latex('\\omega = 2\\pi f') display_full_latex('\\omega = \\Omega T') display_full_latex('x_q(n) = Q[x(n)]') display_full_latex('e_q(n) = x_q(n) - x(n)') display_full_latex('Interpolation function, g(t) = \\frac{sin2\\pi Bt}{2\\pi Bt}') display_full_latex('x_a(t) = \\sum^\\infty _{n = - \\infty} x_a(\\frac{n}{F_s}).g(t - \\frac{n}{F_s})') display_full_latex('\\Delta = \\frac{x_{max} - x_{min}}{L-1}, where L = Number of quantization levels') display_full_latex('-\\frac{\\Delta}{2} \\leq e_q(n) \\leq \\frac{\\Delta}{2}') display_full_latex('b \\geq log_2 L') display_full_latex('SQNR = \\frac{3}{2}.2^{2b}') display_full_latex('SQNR(dB) = 10log_{10}SQNR = 1.76 + 6.02b') x = np.arange(0, 10, 1) y = np.power(0.9, x) * np.heaviside(np.power(0.9, x), 1) display_full_latex('x_a(t) = 0.9^t') display_full_latex('x(n) = 0.9^n') plt.stem(x, y) plt.plot(x, y, 'g-') plt.xticks(np.arange(0, 10, 1)) plt.yticks(np.arange(0, 1.2, 0.1)) plt.xlabel('n') plt.ylabel('x(n)') plt.grid(True) plt.show()
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
14 Oct, 2019
n = sp.symbols('n') x = np.arange(0, 10, 1) y = x * np.heaviside(x, 1) f = sp.Piecewise((0, n < 0), (n, n >= 0)) display_equation('u_r(n)', f) plt.stem(x, y) plt.plot(x, y, 'g-') plt.xticks(np.arange(0, 10, 1)) plt.yticks(np.arange(0, 10, 1)) plt.xlabel('n') plt.ylabel('x(n)') plt.grid(True) plt.show() display_full_latex('E = \\sum^\\infty _{n = -\\infty} x|(n)|^2') display_full_latex('P = \\lim_{N \\rightarrow \\infty} \\frac{1}{2N + 1} \\sum^ N _{n = -N} x|(n)|^2')
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
16 Oct, 2019 General form of the input-output relationships
display_full_latex('y(n) = -\\sum^N _{k = 1}a_k y(n-k) + \\sum^M _{k = 0}b_k x(n-k)')
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
[4] Example 3.2
h = np.array([1, 2, 1, -1]) x = np.array([1, 2, 3, 1]) y = np.convolve(h, x, mode='full') #y = signal.convolve(h, x, mode='full', method='auto') print(y) fig, (ax_orig, ax_h, ax_x) = plt.subplots(3, 1, sharex=True) ax_orig.plot(h) ax_orig.set_title('Impulse Response') ax_orig.margins(0, 0.1) ax_h.plot(x) ax_h.set_title('Input Signal') ax_h.margins(0, 0.1) ax_x.plot(y) ax_x.set_title('Output') ax_x.margins(0, 0.1) fig.tight_layout() fig.show()
[ 1 4 8 8 3 -2 -1]
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
17 Oct, 2019 Sum of an AP with common ratio r and first term a, starting from the zeroth term
a, r = sp.symbols('a r') s = sp.summation(a*r**k, ( k, 0, n )) display_equation('S_n', s)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Sum of positive powers of a
a = sp.symbols('a') s = sp.summation(a**k, ( k, 0, n )) display_equation('S_n', s)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
[3] 4.12.3 Single Pole IIR
SR = 24 b = 1 p = 0.8 y = np.zeros((1, SR)).ravel() x = np.zeros((1, SR + 1)).ravel() x[0] = 1 y[0] = b * x[0] for n in range(1, SR): y[n] = b * x[n] + p * y[n - 1] plt.stem(y)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Copying the method above for [4] 4.1 Averaging
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) y[0] = b * x[0] for n in range(1, len(x)): y[n] = (n/(n + 1)) * y[n - 1] + (1/(n + 1)) * x[n] print(y[n], '\n')
5.5
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
My Recursive Averaging Implementation
def avg(x, n): if (n < 0): return 0 else: return (n/(n + 1)) * avg(x, n - 1) + (1/(n + 1)) * x[n] x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) average = avg(x, len(x) - 1) print(average)
5.5
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
Performance Comparism
from timeit import timeit code_rec = ''' import numpy as np def avg(x, n): if (n < 0): return 0 else: return (n/(n + 1)) * avg(x, n - 1) + (1/(n + 1)) * x[n] x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) average = avg(x, len(x) - 1) ''' code_py = ''' import numpy as np x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) average = sum(x, len(x) - 1) / len(x) ''' code_loop = ''' import numpy as np x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) sum = 0 for i in x: sum += i average = sum/len(x) ''' running_time_rec = timeit(code_rec, number = 100) / 100 running_time_py = timeit(code_py, number = 100) / 100 running_time_loop = timeit(code_loop, number = 100) / 100 print("Running time using my recursive average function: \n",running_time_rec, '\n') print("Running time using python sum function: \n",running_time_py) print("Running time using loop python function: \n",running_time_loop)
Running time using my recursive average function: 9.264100000000001e-05 Running time using python sum function: 4.1410000000000005e-05 Running time using loop python function: 7.479999999999987e-06
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
[4] Example 4.1
def rec_sqrt(x, n): if (n == -1): return 1 else: return (1/2) * (rec_sqrt(x, n - 1) + (x[n]/rec_sqrt(x, n - 1))) A = 2 x = np.ones((1, 5)).ravel() * A print(rec_sqrt(x, len(x) - 1)) b = np.array([1, 1, 1, 1, 1]) a = np.array([1, 0, 0]) zplane(b,a)
_____no_output_____
MIT
.ipynb_checkpoints/DSP-checkpoint.ipynb
Valentine-Efagene/Jupyter-Notebooks
langages de script – Python Modules et packages M1 IngΓ©nierie Multilingue – INaLCOclement.plancq@ens.fr Les modules et les packages permettent d'ajouter des fonctionnalitΓ©s Γ  PythonUn module est un fichier (```.py```) qui contient des fonctions et/ou des classes. Et de la documentation bien sΓ»rUn package est un rΓ©pertoire contenant des modules et des sous-rΓ©pertoires.C'est aussi simple que Γ§a. Γ‰videmment en rentrant dans le dΓ©tail c'est un peu plus compliquΓ©. Un module
%%file operations.py # -*- coding: utf-8 -*- """ Module pour le cours sur les modules OpΓ©rations arithmΓ©tiques """ def addition(a, b): """ Ben une addition quoi : a + b """ return a + b def soustraction(a, b): """ Une soustraction :Β a - b """ return a - b
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
Pour l'utiliser on peut :* l'importer par son nom
import operations operations.addition(2, 4)
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
* l'importer et modifier son nom
import operations as op op.addition(2, 4)
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
* importer une partie du module
from operations import addition addition(2, 4)
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
* importer l'intΓ©gralitΓ© du module
from operations import * addition(2, 4) soustraction(4, 2)
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
En rΓ©alitΓ© seules les fonctions et/ou les classes ne commenΓ§ant pas par '_' sont importΓ©es. L'utilisation de `import *` n'est pas recommandΓ©e. Parce que, comme vous le savez Β« *explicit is better than implicit* Β». Et en ajoutant les fonctions dans l'espace de nommage du script vous pouvez Γ©craser des fonctions existantes. Ajoutez une fonction `print` Γ  votre module pour voir (attention un module n'est chargΓ© qu'une fois, vous devrez relancer le kernel ou passer par la console). Autre dΓ©finition d'un module :Β c'est un objet de type ``module``.
import operations type(operations)
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
``import`` ajoute des attributs au module
import operations print(f"name : {operations.__name__}") print(f"file : {operations.__file__}") print(f"doc :Β {operations.__doc__}")
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
Un package
! tree operations_pack
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
Un package python peut contenir des modules, des rΓ©pertoires et sous-rΓ©pertoires, et bien souvent du non-python :Β de la doc html, des donnΓ©es pour les tests, etc… Le rΓ©pertoire principal et les rΓ©pertoires contenant des modules python doivent contenir un fichier `__init__.py` `__init__.py` peut Γͺtre vide, contenir du code d'initialisation ou contenir la variable `__all__`
import operations_pack.simple operations_pack.simple.addition(2, 4) from operations_pack import simple simple.soustraction(4, 2)
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
``__all__`` dans ``__init__.py`` dΓ©finit quels seront les modules qui seront importΓ©s avec ``import *``
from operations_pack.avance import * multi.multiplication(2,4)
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
OΓΉ sont les modules et les packages ? Pour que ``import`` fonctionne il faut que les modules soient dans le PATH.
import sys sys.path
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
``sys.path`` est une liste, vous pouvez la modifier
sys.path.append("[...]") # le chemin vers le dossier operations_pack sys.path
_____no_output_____
MIT
modules.ipynb
LoicGrobol/python-im
DSPT6 - Adding Data Science to a Web ApplicationThe purpose of this notebook is to demonstrate:- Simple online analysis of data from a user of the Twitoff app or an API- Train a more complicated offline model, and serialize the results for online use
import sqlite3 import pickle import pandas as pd # Connect to sqlite database conn = sqlite3.connect('../twitoff/twitoff.sqlite') def get_data(query, conn): '''Function to get data from SQLite DB''' cursor = conn.cursor() result = cursor.execute(query).fetchall() # Get columns from cursor object columns = list(map(lambda x: x[0], cursor.description)) # Assign to DataFrame df = pd.DataFrame(data=result, columns=columns) return df query = ''' SELECT tweet.id, tweet.text, tweet.embedding, user.username FROM tweet JOIN user ON tweet.user_id = user.id; ''' df = get_data(query, conn) df['embedding_decoded'] = df.embedding.apply(lambda x: pickle.loads(x[2:])) print(df.shape) df.head() df.usernameme.value_counts() import numpy as np user1_embeddings = df.embedding_decoded[df.username=='elonmusk'] user2_embeddings = df.embedding_decoded[df.username=='nasa'] embeddings = pd.concat([user1_embeddings, user2_embeddings]) embeddings_df = pd.DataFrame(embeddings.tolist(), columns=[f'dim{i}' for i in range(768)]) labels = np.concatenate([np.ones(len(user1_embeddings)), np.zeros(len(user2_embeddings))]) print(embeddings_df.shape, labels.shape) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( embeddings_df, labels, test_size=0.25, random_state=42) print(X_train.shape, X_test.shape) from sklearn.linear_model import LogisticRegression lr = LogisticRegression(max_iter=1000) lr.fit(X_train, y_train) import matplotlib.pyplot as plt from sklearn.metrics import classification_report, plot_confusion_matrix y_pred = lr.predict(X_test) print(classification_report(y_test, y_pred)) plot_confusion_matrix(lr, X_test, y_test, cmap='Blues') plt.title('LogReg Confusion Matrix'); pickle.dump(lr, open("../models/logreg.pkl", "wb")) lr_unpickled = pickle.load(open("../models/logreg.pkl", "rb")) lr_unpickled import basilica BASILICA_KEY='' BASILICA = basilica.Connection(BASILICA_KEY) example_embedding = BASILICA.embed_sentence('The MARS rover just reported new and interesting data!', model='twitter') lr_unpickled.predict([example_embedding])[0]
_____no_output_____
MIT
notebooks/LS333_DSPT6_Model_Demo.ipynb
DrewRust/DSPT6-Twitoff
Working with Watson Machine Learning This notebook should be run in a Watson Studio project, using **Default Python 3.7.x** runtime environment. **If you are viewing this in Watson Studio and do not see Python 3.7.x in the upper right corner of your screen, please update the runtime now.** It requires service credentials for the following Cloud services: * Watson OpenScale * Watson Machine Learning If you have a paid Cloud account, you may also provision a **Databases for PostgreSQL** or **Db2 Warehouse** service to take full advantage of integration with Watson Studio and continuous learning services. If you choose not to provision this paid service, you can use the free internal PostgreSQL storage with OpenScale, but will not be able to configure continuous learning for your model.The notebook will train, create and deploy a House Price regression model, configure OpenScale to monitor that deployment in the OpenScale Insights dashboard. Contents- [Setup](setup)- [Model building and deployment](model)- [OpenScale configuration](openscale)- [Quality monitor and feedback logging](quality)- [Fairness, drift monitoring and explanations](fairness) Setup Package installation
import warnings warnings.filterwarnings('ignore') !rm -rf /home/spark/shared/user-libs/python3.7* !pip install --upgrade pandas==1.2.3 --no-cache | tail -n 1 !pip install --upgrade requests==2.23 --no-cache | tail -n 1 !pip install --upgrade numpy==1.20.3 --user --no-cache | tail -n 1 !pip install SciPy --no-cache | tail -n 1 !pip install lime --no-cache | tail -n 1 !pip install --upgrade ibm-watson-machine-learning --user | tail -n 1 !pip install --upgrade ibm-watson-openscale --no-cache | tail -n 1 !pip install --upgrade xgboost==1.3.3 --no-cache | tail -n 1
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Provision services and configure credentials If you have not already, provision an instance of IBM Watson OpenScale using the [OpenScale link in the Cloud catalog](https://cloud.ibm.com/catalog/services/watson-openscale). Your Cloud API key can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. **NOTE:** You can also get OpenScale `API_KEY` using IBM CLOUD CLI.How to install IBM Cloud (bluemix) console: [instruction](https://console.bluemix.net/docs/cli/reference/ibmcloud/download_cli.htmlinstall_use)How to get api key using console:```bx login --ssobx iam api-key-create 'my_key'```
CLOUD_API_KEY = "***" IAM_URL="https://iam.ng.bluemix.net/oidc/token"
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
If you have not already, provision an instance of IBM Watson OpenScale using the [OpenScale link in the Cloud catalog](https://cloud.ibm.com/catalog/services/watson-openscale).Your Cloud API key can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key, generate an IAM token using that key and paste it below. WML credentials example with API key
WML_CREDENTIALS = { "url": "https://us-south.ml.cloud.ibm.com", "apikey": CLOUD_API_KEY }
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
WML credentials example using IAM_token **NOTE**: If IAM_TOKEN is used for authentication and you receive unauthorized/expired token error at any steps, please create a new token and reinitiate clients authentication.
# #uncomment this cell if want to use IAM_TOKEN # import requests # def generate_access_token(): # headers={} # headers["Content-Type"] = "application/x-www-form-urlencoded" # headers["Accept"] = "application/json" # auth = HTTPBasicAuth("bx", "bx") # data = { # "grant_type": "urn:ibm:params:oauth:grant-type:apikey", # "apikey": CLOUD_API_KEY # } # response = requests.post(IAM_URL, data=data, headers=headers, auth=auth) # json_data = response.json() # iam_access_token = json_data['access_token'] # return iam_access_token #uncomment this cell if want to use IAM_TOKEN # IAM_TOKEN = generate_access_token() # WML_CREDENTIALS = { # "url": "https://us-south.ml.cloud.ibm.com", # "token": IAM_TOKEN # }
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Cloud object storage detailsIn next cells, you will need to paste some credentials to Cloud Object Storage. If you haven't worked with COS yet please visit [getting started with COS tutorial](https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-getting-started). You can find `COS_API_KEY_ID` and `COS_RESOURCE_CRN` variables in **_Service Credentials_** in menu of your COS instance. Used COS Service Credentials must be created with _Role_ parameter set as Writer. Later training data file will be loaded to the bucket of your instance and used as training refecence in subsription. `COS_ENDPOINT` variable can be found in **_Endpoint_** field of the menu.
COS_API_KEY_ID = "***" COS_RESOURCE_CRN = "***" # eg "crn:v1:bluemix:public:cloud-object-storage:global:a/3bf0d9003abfb5d29761c3e97696b71c:d6f04d83-6c4f-4a62-a165-696756d63903::" COS_ENDPOINT = "***" # Current list avaiable at https://control.cloud-object-storage.cloud.ibm.com/v2/endpoints BUCKET_NAME = "***" training_data_file_name="house_price_regression.csv"
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
This tutorial can use Databases for PostgreSQL, Db2 Warehouse, or a free internal verison of PostgreSQL to create a datamart for OpenScale.If you have previously configured OpenScale, it will use your existing datamart, and not interfere with any models you are currently monitoring. Do not update the cell below.If you do not have a paid Cloud account or would prefer not to provision this paid service, you may use the free internal PostgreSQL service with OpenScale. Do not update the cell below.To provision a new instance of Db2 Warehouse, locate [Db2 Warehouse in the Cloud catalog](https://cloud.ibm.com/catalog/services/db2-warehouse), give your service a name, and click **Create**. Once your instance is created, click the **Service Credentials** link on the left side of the screen. Click the **New credential** button, give your credentials a name, and click **Add**. Your new credentials can be accessed by clicking the **View credentials** button. Copy and paste your Db2 Warehouse credentials into the cell below.To provision a new instance of Databases for PostgreSQL, locate [Databases for PostgreSQL in the Cloud catalog](https://cloud.ibm.com/catalog/services/databases-for-postgresql), give your service a name, and click **Create**. Once your instance is created, click the **Service Credentials** link on the left side of the screen. Click the **New credential** button, give your credentials a name, and click **Add**. Your new credentials can be accessed by clicking the **View credentials** button. Copy and paste your Databases for PostgreSQL credentials into the cell below.
DB_CREDENTIALS = None #DB_CREDENTIALS= {"hostname":"","username":"","password":"","database":"","port":"","ssl":True,"sslmode":"","certificate_base64":""} KEEP_MY_INTERNAL_POSTGRES = True
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Run the notebookAt this point, the notebook is ready to run. You can either run the cells one at a time, or click the **Kernel** option above and select **Restart and Run All** to run all the cells. Model building and deployment In this section you will learn how to train Spark MLLib model and next deploy it as web-service using Watson Machine Learning service. Load the training data from github
!rm house_price_regression.csv !wget https://raw.githubusercontent.com/IBM/watson-openscale-samples/main/IBM%20Cloud/WML/assets/data/house_price/house_price_regression.csv import pandas as pd import numpy as np pd_data = pd.read_csv("house_price_regression.csv") pd_data.head()
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Explore data Save training data to Cloud Object Storage
import ibm_boto3 from ibm_botocore.client import Config, ClientError cos_client = ibm_boto3.resource("s3", ibm_api_key_id=COS_API_KEY_ID, ibm_service_instance_id=COS_RESOURCE_CRN, ibm_auth_endpoint="https://iam.bluemix.net/oidc/token", config=Config(signature_version="oauth"), endpoint_url=COS_ENDPOINT ) with open(training_data_file_name, "rb") as file_data: cos_client.Object(BUCKET_NAME, training_data_file_name).upload_fileobj( Fileobj=file_data )
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Create a model
from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer pd_data.dropna(axis=0, subset=['SalePrice'], inplace=True) label = pd_data.SalePrice feature_data = pd_data.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object']) train_X, test_X, train_y, test_y = train_test_split(feature_data.values, label.values, test_size=0.25) my_imputer = SimpleImputer(missing_values=np.nan, strategy='mean') train_X = my_imputer.fit_transform(train_X) test_X = my_imputer.transform(test_X) from xgboost import XGBRegressor from sklearn.compose import ColumnTransformer model=XGBRegressor() model.fit(train_X, train_y, eval_metric=['error'], eval_set=[(test_X, test_y)], verbose=False) # make predictions predictions = model.predict(test_X) from sklearn.metrics import mean_absolute_error print("Mean Absolute Error : " + str(mean_absolute_error(predictions, test_y)))
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
wrap xgboost with scikit pipeline
from sklearn.pipeline import Pipeline xgb_model_imputer = SimpleImputer(missing_values=np.nan, strategy='mean') pipeline = Pipeline(steps=[('Imputer', xgb_model_imputer), ('xgb', model)]) model_xgb=pipeline.fit(train_X, train_y) # make predictions predictions = model_xgb.predict(test_X) from sklearn.metrics import mean_absolute_error print("Mean Absolute Error : " + str(mean_absolute_error(predictions, test_y)))
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Publish the model
import json from ibm_watson_machine_learning import APIClient wml_client = APIClient(WML_CREDENTIALS) wml_client.version
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Listing all the available spaces
wml_client.spaces.list(limit=10) WML_SPACE_ID='***' # use space id here wml_client.set.default_space(WML_SPACE_ID)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Remove existing model and deployment
MODEL_NAME="house_price_xgbregression" DEPLOYMENT_NAME="house_price_xgbregression_deployment" deployments_list = wml_client.deployments.get_details() for deployment in deployments_list["resources"]: model_id = deployment["entity"]["asset"]["id"] deployment_id = deployment["metadata"]["id"] if deployment["metadata"]["name"] == DEPLOYMENT_NAME: print("Deleting deployment id", deployment_id) wml_client.deployments.delete(deployment_id) print("Deleting model id", model_id) wml_client.repository.delete(model_id) wml_client.repository.list_models() training_data_references = [ { "id": "product line", "type": "s3", "connection": { "access_key_id": COS_API_KEY_ID, "endpoint_url": COS_ENDPOINT, "resource_instance_id":COS_RESOURCE_CRN }, "location": { "bucket": BUCKET_NAME, "path": training_data_file_name, } } ] #Note if there is specification related exception or specification ID is None then use "default_py3.8" instead of "default_py3.7_opence" software_spec_uid = wml_client.software_specifications.get_id_by_name("default_py3.7_opence") print("Software Specification ID: {}".format(software_spec_uid)) model_props = { wml_client._models.ConfigurationMetaNames.NAME:"{}".format(MODEL_NAME), wml_client._models.ConfigurationMetaNames.TYPE: "scikit-learn_0.23", wml_client._models.ConfigurationMetaNames.SOFTWARE_SPEC_UID: software_spec_uid, wml_client._models.ConfigurationMetaNames.TRAINING_DATA_REFERENCES: training_data_references, wml_client._models.ConfigurationMetaNames.LABEL_FIELD: "SalePrice", } print("Storing model ...") published_model_details = wml_client.repository.store_model( model=model_xgb, meta_props=model_props, training_data=feature_data, training_target=label ) model_uid = wml_client.repository.get_model_uid(published_model_details) print("Done") print("Model ID: {}".format(model_uid))
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Deploy the model The next section of the notebook deploys the model as a RESTful web service in Watson Machine Learning. The deployed model will have a scoring URL you can use to send data to the model for predictions.
deployment_details = wml_client.deployments.create( model_uid, meta_props={ wml_client.deployments.ConfigurationMetaNames.NAME: "{}".format(DEPLOYMENT_NAME), wml_client.deployments.ConfigurationMetaNames.ONLINE: {} } ) scoring_url = wml_client.deployments.get_scoring_href(deployment_details) deployment_uid=wml_client.deployments.get_uid(deployment_details) print("Scoring URL:" + scoring_url) print("Model id: {}".format(model_uid)) print("Deployment id: {}".format(deployment_uid))
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Sample scoring
fields = feature_data.columns.tolist() values = [ test_X[0].tolist() ] scoring_payload = {"input_data": [{"fields": fields, "values": values}]} scoring_payload scoring_response = wml_client.deployments.score(deployment_uid, scoring_payload) scoring_response
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Configure OpenScale The notebook will now import the necessary libraries and set up a Python OpenScale client.
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator,BearerTokenAuthenticator from ibm_watson_openscale import * from ibm_watson_openscale.supporting_classes.enums import * from ibm_watson_openscale.supporting_classes import * authenticator = IAMAuthenticator(apikey=CLOUD_API_KEY) wos_client = APIClient(authenticator=authenticator) wos_client.version
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Create schema and datamart Set up datamart Watson OpenScale uses a database to store payload logs and calculated metrics. If database credentials were **not** supplied above, the notebook will use the free, internal lite database. If database credentials were supplied, the datamart will be created there **unless** there is an existing datamart **and** the **KEEP_MY_INTERNAL_POSTGRES** variable is set to **True**. If an OpenScale datamart exists in Db2 or PostgreSQL, the existing datamart will be used and no data will be overwritten.Prior instances of the House price model will be removed from OpenScale monitoring.
wos_client.data_marts.show() data_marts = wos_client.data_marts.list().result.data_marts if len(data_marts) == 0: if DB_CREDENTIALS is not None: if SCHEMA_NAME is None: print("Please specify the SCHEMA_NAME and rerun the cell") print('Setting up external datamart') added_data_mart_result = wos_client.data_marts.add( background_mode=False, name="WOS Data Mart", description="Data Mart created by WOS tutorial notebook", database_configuration=DatabaseConfigurationRequest( database_type=DatabaseType.POSTGRESQL, credentials=PrimaryStorageCredentialsLong( hostname=DB_CREDENTIALS['hostname'], username=DB_CREDENTIALS['username'], password=DB_CREDENTIALS['password'], db=DB_CREDENTIALS['database'], port=DB_CREDENTIALS['port'], ssl=True, sslmode=DB_CREDENTIALS['sslmode'], certificate_base64=DB_CREDENTIALS['certificate_base64'] ), location=LocationSchemaName( schema_name= SCHEMA_NAME ) ) ).result else: print('Setting up internal datamart') added_data_mart_result = wos_client.data_marts.add( background_mode=False, name="WOS Data Mart", description="Data Mart created by WOS tutorial notebook", internal_database = True).result data_mart_id = added_data_mart_result.metadata.id else: data_mart_id=data_marts[0].metadata.id print('Using existing datamart {}'.format(data_mart_id))
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Remove existing service provider connected with used WML instance. Multiple service providers for the same engine instance are avaiable in Watson OpenScale. To avoid multiple service providers of used WML instance in the tutorial notebook the following code deletes existing service provder(s) and then adds new one.
SERVICE_PROVIDER_NAME = "xgboost_WML V2" SERVICE_PROVIDER_DESCRIPTION = "Added by tutorial WOS notebook." service_providers = wos_client.service_providers.list().result.service_providers for service_provider in service_providers: service_instance_name = service_provider.entity.name if service_instance_name == SERVICE_PROVIDER_NAME: service_provider_id = service_provider.metadata.id wos_client.service_providers.delete(service_provider_id) print("Deleted existing service_provider for WML instance: {}".format(service_provider_id))
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Add service provider Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model. **Note:** You can bind more than one engine instance if needed by calling `wos_client.service_providers.add` method. Next, you can refer to particular service provider using `service_provider_id`.
added_service_provider_result = wos_client.service_providers.add( name=SERVICE_PROVIDER_NAME, description=SERVICE_PROVIDER_DESCRIPTION, service_type=ServiceTypes.WATSON_MACHINE_LEARNING, deployment_space_id = WML_SPACE_ID, operational_space_id = "production", credentials=WMLCredentialsCloud( apikey=CLOUD_API_KEY, ## use `apikey=IAM_TOKEN` if using IAM_TOKEN to initiate client url=WML_CREDENTIALS["url"], instance_id=None ), background_mode=False ).result service_provider_id = added_service_provider_result.metadata.id wos_client.service_providers.show() asset_deployment_details = wos_client.service_providers.list_assets(data_mart_id=data_mart_id, service_provider_id=service_provider_id,deployment_id=deployment_uid, deployment_space_id = WML_SPACE_ID).result['resources'][0] asset_deployment_details model_asset_details_from_deployment=wos_client.service_providers.get_deployment_asset(data_mart_id=data_mart_id,service_provider_id=service_provider_id,deployment_id=deployment_uid,deployment_space_id=WML_SPACE_ID) model_asset_details_from_deployment
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Subscriptions Remove existing House price model subscriptions This code removes previous subscriptions to the House price model to refresh the monitors with the new model and new data.
wos_client.subscriptions.show()
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
This code removes previous subscriptions to the House price model to refresh the monitors with the new model and new data.
subscriptions = wos_client.subscriptions.list().result.subscriptions for subscription in subscriptions: sub_model_id = subscription.entity.asset.asset_id if sub_model_id == model_uid: wos_client.subscriptions.delete(subscription.metadata.id) print('Deleted existing subscription for model', sub_model_id)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
This code creates the model subscription in OpenScale using the Python client API. Note that we need to provide the model unique identifier, and some information about the model itself. This code creates the model subscription in OpenScale using the Python client API. Note that we need to provide the model unique identifier, and some information about the model itself.
feature_cols=feature_data.columns.tolist() #categorical_cols=X.select_dtypes(include=['object']).columns from ibm_watson_openscale.base_classes.watson_open_scale_v2 import ScoringEndpointRequest subscription_details = wos_client.subscriptions.add( data_mart_id=data_mart_id, service_provider_id=service_provider_id, asset=Asset( asset_id=model_asset_details_from_deployment["entity"]["asset"]["asset_id"], name=model_asset_details_from_deployment["entity"]["asset"]["name"], url=model_asset_details_from_deployment["entity"]["asset"]["url"], asset_type=AssetTypes.MODEL, input_data_type=InputDataType.STRUCTURED, problem_type=ProblemType.REGRESSION ), deployment=AssetDeploymentRequest( deployment_id=asset_deployment_details['metadata']['guid'], name=asset_deployment_details['entity']['name'], deployment_type= DeploymentTypes.ONLINE, url=asset_deployment_details['metadata']['url'], scoring_endpoint=ScoringEndpointRequest(url=scoring_url) # scoring model without shadow deployment ), asset_properties=AssetPropertiesRequest( label_column='SalePrice', prediction_field='prediction', feature_fields = feature_cols, #categorical_fields = categorical_cols, training_data_reference=TrainingDataReference(type='cos', location=COSTrainingDataReferenceLocation(bucket = BUCKET_NAME, file_name = training_data_file_name), connection=COSTrainingDataReferenceConnection.from_dict({ "resource_instance_id": COS_RESOURCE_CRN, "url": COS_ENDPOINT, "api_key": COS_API_KEY_ID, "iam_url": IAM_URL})) ),background_mode = False ).result subscription_id = subscription_details.metadata.id subscription_id import time time.sleep(5) payload_data_set_id = None payload_data_set_id = wos_client.data_sets.list(type=DataSetTypes.PAYLOAD_LOGGING, target_target_id=subscription_id, target_target_type=TargetTypes.SUBSCRIPTION).result.data_sets[0].metadata.id if payload_data_set_id is None: print("Payload data set not found. Please check subscription status.") else: print("Payload data set id: ", payload_data_set_id) wos_client.data_sets.show()
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Get subscription list
wos_client.subscriptions.show()
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Score the model so we can configure monitors
import random fields = feature_data.columns.tolist() values = random.sample(test_X.tolist(), 2) scoring_payload = {"input_data": [{"fields": fields, "values": values}]} predictions = wml_client.deployments.score(deployment_uid, scoring_payload) print("Single record scoring result:", "\n fields:", predictions["predictions"][0]["fields"], "\n values: ", predictions["predictions"][0]["values"][0])
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Check if WML payload logging worked else manually store payload records
import uuid from ibm_watson_openscale.supporting_classes.payload_record import PayloadRecord time.sleep(5) pl_records_count = wos_client.data_sets.get_records_count(payload_data_set_id) print("Number of records in the payload logging table: {}".format(pl_records_count)) if pl_records_count == 0: print("Payload logging did not happen, performing explicit payload logging.") wos_client.data_sets.store_records(data_set_id=payload_data_set_id, request_body=[PayloadRecord( scoring_id=str(uuid.uuid4()), request=scoring_payload, response={"fields": predictions['predictions'][0]['fields'], "values":predictions['predictions'][0]['values']}, response_time=460 )],background_mode=False) time.sleep(5) pl_records_count = wos_client.data_sets.get_records_count(payload_data_set_id) print("Number of records in the payload logging table: {}".format(pl_records_count)) wos_client.data_sets.show_records(payload_data_set_id)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Quality monitoring and feedback logging Enable quality monitoring
import time time.sleep(10) target = Target( target_type=TargetTypes.SUBSCRIPTION, target_id=subscription_id ) parameters = { "min_feedback_data_size": 50 } quality_monitor_details = wos_client.monitor_instances.create( data_mart_id=data_mart_id, background_mode=False, monitor_definition_id=wos_client.monitor_definitions.MONITORS.QUALITY.ID, target=target, parameters=parameters ).result quality_monitor_instance_id = quality_monitor_details.metadata.id quality_monitor_instance_id
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Feedback logging The code below downloads and stores enough feedback data to meet the minimum threshold so that OpenScale can calculate a new accuracy measurement. It then kicks off the accuracy monitor. The monitors run hourly, or can be initiated via the Python API, the REST API, or the graphical user interface. Get feedback logging dataset ID
feedback_dataset_id = None feedback_dataset = wos_client.data_sets.list(type=DataSetTypes.FEEDBACK, target_target_id=subscription_id, target_target_type=TargetTypes.SUBSCRIPTION).result print(feedback_dataset) feedback_dataset_id = feedback_dataset.data_sets[0].metadata.id if feedback_dataset_id is None: print("Feedback data set not found. Please check quality monitor status.") !rm custom_feedback_50_regression.json !wget https://raw.githubusercontent.com/IBM/watson-openscale-samples/main/IBM%20Cloud/WML/assets/data/house_price/custom_feedback_50_regression.json with open ('custom_feedback_50_regression.json')as file: feedback_data=json.load(file) wos_client.data_sets.store_records(feedback_dataset_id, request_body=feedback_data, background_mode=False) wos_client.data_sets.get_records_count(data_set_id=feedback_dataset_id) run_details = wos_client.monitor_instances.run(monitor_instance_id=quality_monitor_instance_id, background_mode=False).result wos_client.monitor_instances.show_metrics(monitor_instance_id=quality_monitor_instance_id)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Fairness, drift monitoring and explanations Fairness configurationThe code below configures fairness monitoring for our model. It turns on monitoring for one features, MSSubClass. In each case, we must specify: * Which model feature to monitor * One or more **majority** groups, which are values of that feature that we expect to receive a higher percentage of favorable outcomes * One or more **minority** groups, which are values of that feature that we expect to receive a higher percentage of unfavorable outcomes * The threshold at which we would like OpenScale to display an alert if the fairness measurement falls below (in this case, 80%)Additionally, we must specify which outcomes from the model are favourable outcomes, and which are unfavourable. We must also provide the number of records OpenScale will use to calculate the fairness score. In this case, OpenScale's fairness monitor will run hourly, but will not calculate a new fairness rating until at least 50 records have been added. Finally, to calculate fairness, OpenScale must perform some calculations on the training data, so we provide the dataframe containing the data.
wos_client.monitor_instances.show() #wos_client.monitor_instances.delete(drift_monitor_instance_id,background_mode=False) target = Target( target_type=TargetTypes.SUBSCRIPTION, target_id=subscription_id ) parameters = { "features": [ { "feature": "MSSubClass", "majority": [[50,70]], "threshold": 0.8, "minority": [[80,100]] } ], "favourable_class": [[200000,500000]], "unfavourable_class": [[35000,100000]], "min_records": 50 } fairness_monitor_details = wos_client.monitor_instances.create( data_mart_id=data_mart_id, background_mode=False, monitor_definition_id=wos_client.monitor_definitions.MONITORS.FAIRNESS.ID, target=target, parameters=parameters).result fairness_monitor_instance_id =fairness_monitor_details.metadata.id fairness_monitor_instance_id
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Drift configuration Note: you can choose to enable/disable (True or False) model or data drift within config
monitor_instances = wos_client.monitor_instances.list().result.monitor_instances for monitor_instance in monitor_instances: monitor_def_id=monitor_instance.entity.monitor_definition_id if monitor_def_id == "drift" and monitor_instance.entity.target.target_id == subscription_id: wos_client.monitor_instances.delete(monitor_instance.metadata.id) print('Deleted existing drift monitor instance with id: ', monitor_instance.metadata.id) target = Target( target_type=TargetTypes.SUBSCRIPTION, target_id=subscription_id ) parameters = { "min_samples": 50, "drift_threshold": 0.1, "train_drift_model": True, "enable_model_drift": True, "enable_data_drift": True } drift_monitor_details = wos_client.monitor_instances.create( data_mart_id=data_mart_id, background_mode=False, monitor_definition_id=wos_client.monitor_definitions.MONITORS.DRIFT.ID, target=target, parameters=parameters ).result drift_monitor_instance_id = drift_monitor_details.metadata.id drift_monitor_instance_id
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Score the model again now that monitoring is configured This next section randomly selects 200 records from the data feed and sends those records to the model for predictions. This is enough to exceed the minimum threshold for records set in the previous section, which allows OpenScale to begin calculating fairness.
!wget https://raw.githubusercontent.com/IBM/watson-openscale-samples/main/IBM%20Cloud/WML/assets/data/house_price/custom_scoring_payloads_50_regression.json with open('custom_scoring_payloads_50_regression.json', 'r') as scoring_file: scoring_data = json.load(scoring_file) import random with open('custom_scoring_payloads_50_regression.json', 'r') as scoring_file: scoring_data = json.load(scoring_file) fields = scoring_data[0]['request']['fields'] values = scoring_data[0]['request']['values'] payload_scoring = {"input_data": [{"fields": fields, "values": values}]} scoring_response = wml_client.deployments.score(deployment_uid, payload_scoring) time.sleep(5) pl_records_count = wos_client.data_sets.get_records_count(payload_data_set_id) if pl_records_count == 2: print("Payload logging did not happen, performing explicit payload logging.") wos_client.data_sets.store_records(data_set_id=payload_data_set_id, request_body=[PayloadRecord( scoring_id=str(uuid.uuid4()), request=payload_scoring, response={"fields": scoring_response['predictions'][0]['fields'], "values":scoring_response['predictions'][0]['values']}, response_time=460 )]) time.sleep(5) pl_records_count = wos_client.data_sets.get_records_count(payload_data_set_id) print("Number of records in the payload logging table: {}".format(pl_records_count)) print('Number of records in payload table: ', wos_client.data_sets.get_records_count(data_set_id=payload_data_set_id))
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Run fairness monitor Kick off a fairness monitor run on current data. The monitor runs hourly, but can be manually initiated using the Python client, the REST API, or the graphical user interface.
run_details = wos_client.monitor_instances.run(monitor_instance_id=fairness_monitor_instance_id, background_mode=False) time.sleep(10) wos_client.monitor_instances.show_metrics(monitor_instance_id=fairness_monitor_instance_id)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Run drift monitorKick off a drift monitor run on current data. The monitor runs every hour, but can be manually initiated using the Python client, the REST API.
drift_run_details = wos_client.monitor_instances.run(monitor_instance_id=drift_monitor_instance_id, background_mode=False) time.sleep(5) wos_client.monitor_instances.show_metrics(monitor_instance_id=drift_monitor_instance_id)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Configure Explainability Finally, we provide OpenScale with the training data to enable and configure the explainability features.
target = Target( target_type=TargetTypes.SUBSCRIPTION, target_id=subscription_id ) parameters = { "enabled": True } explainability_details = wos_client.monitor_instances.create( data_mart_id=data_mart_id, background_mode=False, monitor_definition_id=wos_client.monitor_definitions.MONITORS.EXPLAINABILITY.ID, target=target, parameters=parameters ).result explainability_monitor_id = explainability_details.metadata.id
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Run explanation for sample record
pl_records_resp = wos_client.data_sets.get_list_of_records(data_set_id=payload_data_set_id, limit=1, offset=0).result scoring_ids = [pl_records_resp["records"][0]["entity"]["values"]["scoring_id"]] print("Running explanations on scoring IDs: {}".format(scoring_ids)) explanation_types = ["lime", "contrastive"] result = wos_client.monitor_instances.explanation_tasks(scoring_ids=scoring_ids, explanation_types=explanation_types).result print(result) explanation_task_id=result.to_dict()['metadata']['explanation_task_ids'][0] explanation_task_id wos_client.monitor_instances.get_explanation_tasks(explanation_task_id=explanation_task_id).result.to_dict()
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Additional data to help debugging
print('Datamart:', data_mart_id) print('Model:', model_uid) print('Deployment:', deployment_uid)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
Identify transactions for Explainability Transaction IDs identified by the cells below can be copied and pasted into the Explainability tab of the OpenScale dashboard.
wos_client.data_sets.show_records(payload_data_set_id, limit=5)
_____no_output_____
Apache-2.0
IBM Cloud/WML/notebooks/regression/xgboost_scikit_wrapper/Watson OpenScale and Watson ML Engine Regression.ipynb
arsuryan/watson-openscale-samples
This Notebook uses a Session Event Dataset from E-Commerce Website (https://www.kaggle.com/mkechinov/ecommerce-behavior-data-from-multi-category-store and https://rees46.com/) to build an Outlier Detection based on an Autoencoder.
import mlflow import numpy as np import os import shutil import pandas as pd import tensorflow as tf import tensorflow.keras as keras import tensorflow_hub as hub from itertools import product # enable gpu growth if gpu is available gpu_devices = tf.config.experimental.list_physical_devices('GPU') for device in gpu_devices: tf.config.experimental.set_memory_growth(device, True) # tf.keras.mixed_precision.set_global_policy('mixed_float16') tf.config.optimizer.set_jit(True) %load_ext watermark %watermark -v -iv
INFO:tensorflow:Mixed precision compatibility check (mixed_float16): OK Your GPU will likely run quickly with dtype policy mixed_float16 as it has compute capability of at least 7.0. Your GPU: GeForce RTX 2070 SUPER, compute capability 7.5 numpy 1.19.4 mlflow 1.14.1 tensorflow 2.4.0 tensorflow_hub 0.11.0 pandas 1.1.5 tensorflow.keras 2.4.0 CPython 3.6.9 IPython 7.16.1
MIT
outlier_detection/training_outlier_detection.ipynb
felix-exel/kfserving-advanced
Setting Registry and Tracking URI for MLflow
# Use this registry uri when mlflow is created by docker container with a mysql db backend #registry_uri = os.path.expandvars('mysql+pymysql://${MYSQL_USER}:${MYSQL_PASSWORD}@localhost:3306/${MYSQL_DATABASE}') # Use this registry uri when mlflow is running locally by the command: # "mlflow server --backend-store-uri sqlite:///mlflow.db --default-artifact-root ./mlruns --host 0.0.0.0" registry_uri = 'sqlite:///mlflow.db' tracking_uri = 'http://localhost:5000' mlflow.tracking.set_registry_uri(registry_uri) mlflow.tracking.set_tracking_uri(tracking_uri)
_____no_output_____
MIT
outlier_detection/training_outlier_detection.ipynb
felix-exel/kfserving-advanced
The Data is taken from https://www.kaggle.com/mkechinov/ecommerce-behavior-data-from-multi-category-store and https://rees46.com/ Each record/line in the file has the following fields:1. event_time: When did the event happened (UTC)2. event_type: Event type: one of [view, cart, remove_from_cart, purchase] 3. product_id4. category_id5. category_code: Category meaningful name (if present)6. brand: Brand name in lower case (if present)7. price8. user_id: Permanent user ID9. user_session: User session ID
# Read first 500.000 Rows for chunk in pd.read_table("2019-Dec.csv", sep=",", header=0, infer_datetime_format=True, low_memory=False, chunksize=500000): # Filter out other event types than 'view' chunk = chunk[chunk['event_type'] == 'view'] # Filter out missing 'category_code' rows chunk = chunk[chunk['category_code'].isna() == False] chunk.reset_index(drop=True, inplace=True) # Filter out all Sessions of length 1 count_sessions = chunk.groupby('user_session').count() window_length = count_sessions.max()[0] unique_sessions = [count_sessions.index[i] for i in range( count_sessions.shape[0]) if count_sessions.iloc[i, 0] == 1] chunk = chunk[~chunk['user_session'].isin(unique_sessions)] chunk.reset_index(drop=True, inplace=True) # Text embedding based on https://tfhub.dev/google/nnlm-en-dim50/2 last_category = [] for i, el in enumerate(chunk['category_code']): last_category.append(el.split('.')[-1]) chunk['Product'] = last_category embed = hub.load("https://tfhub.dev/google/nnlm-en-dim50/2") embeddings = embed(chunk['Product'].tolist()) for dim in range(embeddings.shape[1]): chunk['embedding_'+str(dim)] = embeddings[:, dim] # Standardization mean = chunk['price'].mean(axis=0) print('Mean:', mean) std = chunk['price'].std(axis=0) print('Std:', std) chunk['price_standardized'] = (chunk['price'] - mean) / std chunk.sort_values(by=['user_session', 'event_time'], inplace=True) chunk['price_standardized'] = chunk['price_standardized'].astype('float32') chunk['product_id'] = chunk['product_id'].astype('int32') chunk.reset_index(drop=True, inplace=True) print('Sessions:', pd.unique(chunk['user_session']).shape) print('Unique Products:', pd.unique(chunk['product_id']).shape) print('Unique category_code:', pd.unique(chunk['category_code']).shape) columns = ['embedding_'+str(i) for i in range(embeddings.shape[1])] columns.append('price_standardized') columns.append('user_session') columns.append('Product') columns.append('product_id') columns.append('category_code') df = chunk[columns] break df
Mean: 284.7710546866538 Std: 349.46740231584886 Sessions: (61296,) Unique Products: (38515,) Unique category_code: (134,)
MIT
outlier_detection/training_outlier_detection.ipynb
felix-exel/kfserving-advanced
Delete Rows with equal or less than 6 Product Occurrences
count_product_id_mapped = df.groupby('product_id').count() products_to_delete = count_product_id_mapped.loc[count_product_id_mapped['embedding_0'] <= 6].index products_to_delete
_____no_output_____
MIT
outlier_detection/training_outlier_detection.ipynb
felix-exel/kfserving-advanced